From 9aeb3457e4ae3dbc1f8313a4dd38160419a3a57a Mon Sep 17 00:00:00 2001
From: Aphral Griffin
Date: Wed, 15 Feb 2023 11:03:41 +0000
Subject: [PATCH 1/5] draft fix api tests
---
go.work.sum | 328 +
scripts/docker/nginx-oss/ubuntu/Dockerfile | 1 +
sdk/proto/events/event.pb.go | 1 +
test/component/agent_api_test.go | 83 +-
test/integration/api/api_test.go | 31 +-
test/integration/go.mod | 21 +-
test/integration/go.sum | 110 +-
.../fsnotify/fsnotify/.editorconfig | 12 +
.../fsnotify/fsnotify/.gitattributes | 1 +
.../github.com/fsnotify/fsnotify/.gitignore | 6 +
.../github.com/fsnotify/fsnotify/.mailmap | 2 +
.../github.com/fsnotify/fsnotify/CHANGELOG.md | 470 ++
.../fsnotify/fsnotify/CONTRIBUTING.md | 26 +
.../github.com/fsnotify/fsnotify/LICENSE | 25 +
.../github.com/fsnotify/fsnotify/README.md | 161 +
.../fsnotify/fsnotify/backend_fen.go | 162 +
.../fsnotify/fsnotify/backend_inotify.go | 459 ++
.../fsnotify/fsnotify/backend_kqueue.go | 707 +++
.../fsnotify/fsnotify/backend_other.go | 66 +
.../fsnotify/fsnotify/backend_windows.go | 746 +++
.../github.com/fsnotify/fsnotify/fsnotify.go | 81 +
.../github.com/fsnotify/fsnotify/mkdoc.zsh | 208 +
.../fsnotify/fsnotify/system_bsd.go | 8 +
.../fsnotify/fsnotify/system_darwin.go | 9 +
.../go-grpc-middleware/retry/backoff.go | 44 +
.../go-grpc-middleware/retry/doc.go | 25 +
.../go-grpc-middleware/retry/options.go | 142 +
.../go-grpc-middleware/retry/retry.go | 329 ++
.../util/backoffutils/backoff.go | 28 +
.../go-grpc-middleware/util/metautils/doc.go | 19 +
.../util/metautils/nicemd.go | 126 +
.../go-grpc-middleware/validator/doc.go | 45 +
.../go-grpc-middleware/validator/validator.go | 90 +
.../github.com/hashicorp/hcl/.gitignore | 9 +
.../github.com/hashicorp/hcl/.travis.yml | 13 +
.../vendor/github.com/hashicorp/hcl/LICENSE | 354 ++
.../vendor/github.com/hashicorp/hcl/Makefile | 18 +
.../vendor/github.com/hashicorp/hcl/README.md | 125 +
.../github.com/hashicorp/hcl/appveyor.yml | 19 +
.../github.com/hashicorp/hcl/decoder.go | 729 +++
.../vendor/github.com/hashicorp/hcl/hcl.go | 11 +
.../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 +
.../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 +
.../hashicorp/hcl/hcl/parser/error.go | 17 +
.../hashicorp/hcl/hcl/parser/parser.go | 532 ++
.../hashicorp/hcl/hcl/printer/nodes.go | 789 +++
.../hashicorp/hcl/hcl/printer/printer.go | 66 +
.../hashicorp/hcl/hcl/scanner/scanner.go | 652 ++
.../hashicorp/hcl/hcl/strconv/quote.go | 241 +
.../hashicorp/hcl/hcl/token/position.go | 46 +
.../hashicorp/hcl/hcl/token/token.go | 219 +
.../hashicorp/hcl/json/parser/flatten.go | 117 +
.../hashicorp/hcl/json/parser/parser.go | 313 +
.../hashicorp/hcl/json/scanner/scanner.go | 451 ++
.../hashicorp/hcl/json/token/position.go | 46 +
.../hashicorp/hcl/json/token/token.go | 118 +
.../vendor/github.com/hashicorp/hcl/lex.go | 38 +
.../vendor/github.com/hashicorp/hcl/parse.go | 39 +
.../github.com/klauspost/cpuid/v2/.gitignore | 24 +
.../klauspost/cpuid/v2/.goreleaser.yml | 74 +
.../klauspost/cpuid/v2/CONTRIBUTING.txt | 35 +
.../github.com/klauspost/cpuid/v2/LICENSE | 22 +
.../github.com/klauspost/cpuid/v2/README.md | 258 +
.../github.com/klauspost/cpuid/v2/cpuid.go | 1262 ++++
.../github.com/klauspost/cpuid/v2/cpuid_386.s | 47 +
.../klauspost/cpuid/v2/cpuid_amd64.s | 72 +
.../klauspost/cpuid/v2/cpuid_arm64.s | 26 +
.../klauspost/cpuid/v2/detect_arm64.go | 247 +
.../klauspost/cpuid/v2/detect_ref.go | 15 +
.../klauspost/cpuid/v2/detect_x86.go | 36 +
.../klauspost/cpuid/v2/featureid_string.go | 233 +
.../klauspost/cpuid/v2/os_darwin_arm64.go | 121 +
.../klauspost/cpuid/v2/os_linux_arm64.go | 130 +
.../klauspost/cpuid/v2/os_other_arm64.go | 16 +
.../klauspost/cpuid/v2/os_safe_linux_arm64.go | 8 +
.../cpuid/v2/os_unsafe_linux_arm64.go | 11 +
.../klauspost/cpuid/v2/test-architectures.sh | 15 +
.../github.com/lufia/plan9stats/.gitignore | 12 +
.../github.com/lufia/plan9stats/LICENSE | 29 +
.../github.com/lufia/plan9stats/README.md | 13 +
.../vendor/github.com/lufia/plan9stats/cpu.go | 288 +
.../github.com/lufia/plan9stats/disk.go | 116 +
.../vendor/github.com/lufia/plan9stats/doc.go | 2 +
.../github.com/lufia/plan9stats/host.go | 223 +
.../vendor/github.com/lufia/plan9stats/int.go | 40 +
.../github.com/lufia/plan9stats/opts.go | 21 +
.../github.com/lufia/plan9stats/stats.go | 88 +
.../github.com/nginx/agent/sdk/v2/Makefile | 56 +
.../sdk/v2/agent/config/config_helpers.go | 44 +
.../github.com/nginx/agent/sdk/v2/backoff.go | 44 +
.../nginx/agent/sdk/v2/certificates.go | 52 +
.../nginx/agent/sdk/v2/checksum/checksum.go | 38 +
.../nginx/agent/sdk/v2/client/client.go | 93 +
.../nginx/agent/sdk/v2/client/commander.go | 350 ++
.../nginx/agent/sdk/v2/client/connect.go | 26 +
.../nginx/agent/sdk/v2/client/controller.go | 68 +
.../nginx/agent/sdk/v2/client/errors.go | 22 +
.../nginx/agent/sdk/v2/client/message.go | 91 +
.../agent/sdk/v2/client/metric_reporter.go | 210 +
.../nginx/agent/sdk/v2/config_apply.go | 260 +
.../nginx/agent/sdk/v2/config_helpers.go | 881 +++
.../nginx/agent/sdk/v2/files/file_helpers.go | 38 +
.../nginx/agent/sdk/v2/grpc/conts.go | 12 +
.../nginx/agent/sdk/v2/grpc/grpc.go | 193 +
.../nginx/agent/sdk/v2/grpc/meta.go | 42 +
.../nginx/agent/sdk/v2/interceptors/client.go | 116 +
.../agent/sdk/v2/interceptors/interceptors.go | 23 +
.../nginx/agent/sdk/v2/proto/agent.pb.go | 2922 +++++++++
.../nginx/agent/sdk/v2/proto/agent.proto | 135 +
.../nginx/agent/sdk/v2/proto/command.pb.go | 5256 +++++++++++++++++
.../nginx/agent/sdk/v2/proto/command.proto | 232 +
.../agent/sdk/v2/proto/command_svc.pb.go | 303 +
.../agent/sdk/v2/proto/command_svc.proto | 21 +
.../nginx/agent/sdk/v2/proto/common.pb.go | 3772 ++++++++++++
.../nginx/agent/sdk/v2/proto/common.proto | 146 +
.../agent/sdk/v2/proto/common/common.pb.go | 382 ++
.../agent/sdk/v2/proto/common/common.proto | 15 +
.../nginx/agent/sdk/v2/proto/config.pb.go | 696 +++
.../nginx/agent/sdk/v2/proto/config.proto | 25 +
.../sdk/v2/proto/dp_software_details.pb.go | 481 ++
.../sdk/v2/proto/dp_software_details.proto | 18 +
.../agent/sdk/v2/proto/events/event.pb.go | 4777 +++++++++++++++
.../agent/sdk/v2/proto/events/event.proto | 140 +
.../nginx/agent/sdk/v2/proto/host.pb.go | 3191 ++++++++++
.../nginx/agent/sdk/v2/proto/host.proto | 110 +
.../nginx/agent/sdk/v2/proto/metrics.pb.go | 1255 ++++
.../nginx/agent/sdk/v2/proto/metrics.proto | 56 +
.../agent/sdk/v2/proto/metrics.svc.pb.go | 236 +
.../agent/sdk/v2/proto/metrics.svc.proto | 17 +
.../nginx/agent/sdk/v2/proto/nap.pb.go | 815 +++
.../nginx/agent/sdk/v2/proto/nap.proto | 37 +
.../nginx/agent/sdk/v2/proto/nginx.pb.go | 3503 +++++++++++
.../nginx/agent/sdk/v2/proto/nginx.proto | 170 +
.../nginx/agent/sdk/v2/traverser.go | 99 +
.../nginx/agent/sdk/v2/zip/zipped_file.go | 226 +
.../nginx/agent/v2/src/core/checksum.go | 21 +
.../agent/v2/src/core/config/commands.go | 75 +
.../nginx/agent/v2/src/core/config/config.go | 458 ++
.../agent/v2/src/core/config/defaults.go | 469 ++
.../nginx/agent/v2/src/core/config/flags.go | 79 +
.../nginx/agent/v2/src/core/config/types.go | 138 +
.../nginx/agent/v2/src/core/environment.go | 621 ++
.../nginx/agent/v2/src/core/info.go | 28 +
.../nginx/agent/v2/src/core/message.go | 43 +
.../nginx/agent/v2/src/core/mock_pipe.go | 100 +
.../agent/v2/src/core/network/network.go | 405 ++
.../nginx/agent/v2/src/core/nginx.go | 847 +++
.../github.com/nginx/agent/v2/src/core/os.go | 42 +
.../nginx/agent/v2/src/core/pipe.go | 116 +
.../nginx/agent/v2/src/core/plugin.go | 16 +
.../nginx/agent/v2/src/core/process.go | 55 +
.../nginx/agent/v2/src/core/slice.go | 21 +
.../nginx/agent/v2/src/core/topics.go | 56 +
.../nginx/agent/v2/test/utils/agent_config.go | 146 +
.../agent/v2/test/utils/command_client.go | 126 +
.../nginx/agent/v2/test/utils/defaults.go | 12 +
.../nginx/agent/v2/test/utils/environment.go | 121 +
.../v2/test/utils/metrics_report_client.go | 87 +
.../nginx/agent/v2/test/utils/nginx.go | 171 +
.../nginx/agent/v2/test/utils/process.go | 31 +
.../nginx/agent/v2/test/utils/symbols.go | 40 +
.../agent/v2/test/utils/system/system.go | 41 +
.../nginx/agent/v2/test/utils/tls.go | 35 +
.../nginx-go-crossplane/.dockerignore | 1 +
.../nginxinc/nginx-go-crossplane/.gitignore | 34 +
.../nginx-go-crossplane/.gitlab-ci.yml | 35 +
.../nginx-go-crossplane/.golangci.yml | 84 +
.../nginxinc/nginx-go-crossplane/CREDITS | 1 +
.../nginxinc/nginx-go-crossplane/LICENSE | 201 +
.../nginxinc/nginx-go-crossplane/Makefile | 100 +
.../nginxinc/nginx-go-crossplane/README.md | 74 +
.../nginxinc/nginx-go-crossplane/analyze.go | 2333 ++++++++
.../nginxinc/nginx-go-crossplane/buffer.go | 51 +
.../nginxinc/nginx-go-crossplane/build.go | 284 +
.../nginxinc/nginx-go-crossplane/errors.go | 32 +
.../nginxinc/nginx-go-crossplane/lex.go | 222 +
.../nginxinc/nginx-go-crossplane/parse.go | 454 ++
.../nginxinc/nginx-go-crossplane/types.go | 134 +
.../nginxinc/nginx-go-crossplane/util.go | 164 +
.../pelletier/go-toml/v2/.dockerignore | 2 +
.../pelletier/go-toml/v2/.gitattributes | 4 +
.../pelletier/go-toml/v2/.gitignore | 6 +
.../pelletier/go-toml/v2/.golangci.toml | 84 +
.../pelletier/go-toml/v2/.goreleaser.yaml | 123 +
.../pelletier/go-toml/v2/CONTRIBUTING.md | 196 +
.../pelletier/go-toml/v2/Dockerfile | 5 +
.../github.com/pelletier/go-toml/v2/LICENSE | 21 +
.../github.com/pelletier/go-toml/v2/README.md | 552 ++
.../pelletier/go-toml/v2/SECURITY.md | 19 +
.../github.com/pelletier/go-toml/v2/ci.sh | 279 +
.../github.com/pelletier/go-toml/v2/decode.go | 544 ++
.../github.com/pelletier/go-toml/v2/doc.go | 2 +
.../github.com/pelletier/go-toml/v2/errors.go | 270 +
.../pelletier/go-toml/v2/internal/ast/ast.go | 144 +
.../go-toml/v2/internal/ast/builder.go | 51 +
.../pelletier/go-toml/v2/internal/ast/kind.go | 69 +
.../go-toml/v2/internal/danger/danger.go | 65 +
.../go-toml/v2/internal/danger/typeid.go | 23 +
.../go-toml/v2/internal/tracker/key.go | 50 +
.../go-toml/v2/internal/tracker/seen.go | 356 ++
.../go-toml/v2/internal/tracker/tracker.go | 1 +
.../pelletier/go-toml/v2/localtime.go | 120 +
.../pelletier/go-toml/v2/marshaler.go | 1040 ++++
.../github.com/pelletier/go-toml/v2/parser.go | 1086 ++++
.../pelletier/go-toml/v2/scanner.go | 269 +
.../github.com/pelletier/go-toml/v2/strict.go | 107 +
.../github.com/pelletier/go-toml/v2/toml.abnf | 243 +
.../github.com/pelletier/go-toml/v2/types.go | 14 +
.../pelletier/go-toml/v2/unmarshaler.go | 1227 ++++
.../github.com/pelletier/go-toml/v2/utf8.go | 240 +
.../github.com/power-devops/perfstat/LICENSE | 23 +
.../power-devops/perfstat/c_helpers.c | 159 +
.../power-devops/perfstat/c_helpers.h | 58 +
.../power-devops/perfstat/config.go | 18 +
.../power-devops/perfstat/cpustat.go | 138 +
.../power-devops/perfstat/diskstat.go | 137 +
.../github.com/power-devops/perfstat/doc.go | 315 +
.../power-devops/perfstat/fsstat.go | 31 +
.../power-devops/perfstat/helpers.go | 764 +++
.../power-devops/perfstat/lparstat.go | 26 +
.../power-devops/perfstat/lvmstat.go | 72 +
.../power-devops/perfstat/memstat.go | 84 +
.../power-devops/perfstat/netstat.go | 117 +
.../power-devops/perfstat/procstat.go | 75 +
.../power-devops/perfstat/sysconf.go | 195 +
.../power-devops/perfstat/systemcfg.go | 635 ++
.../power-devops/perfstat/types_cpu.go | 186 +
.../power-devops/perfstat/types_disk.go | 176 +
.../power-devops/perfstat/types_fs.go | 195 +
.../power-devops/perfstat/types_lpar.go | 68 +
.../power-devops/perfstat/types_lvm.go | 31 +
.../power-devops/perfstat/types_memory.go | 101 +
.../power-devops/perfstat/types_network.go | 163 +
.../power-devops/perfstat/types_process.go | 43 +
.../power-devops/perfstat/uptime.go | 35 +
.../github.com/shirou/gopsutil/v3/LICENSE | 61 +
.../github.com/shirou/gopsutil/v3/cpu/cpu.go | 200 +
.../shirou/gopsutil/v3/cpu/cpu_aix.go | 16 +
.../shirou/gopsutil/v3/cpu/cpu_aix_cgo.go | 66 +
.../shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go | 95 +
.../shirou/gopsutil/v3/cpu/cpu_darwin.go | 112 +
.../shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go | 111 +
.../gopsutil/v3/cpu/cpu_darwin_nocgo.go | 14 +
.../shirou/gopsutil/v3/cpu/cpu_dragonfly.go | 156 +
.../gopsutil/v3/cpu/cpu_dragonfly_amd64.go | 9 +
.../shirou/gopsutil/v3/cpu/cpu_fallback.go | 31 +
.../shirou/gopsutil/v3/cpu/cpu_freebsd.go | 168 +
.../shirou/gopsutil/v3/cpu/cpu_freebsd_386.go | 9 +
.../gopsutil/v3/cpu/cpu_freebsd_amd64.go | 9 +
.../shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go | 9 +
.../gopsutil/v3/cpu/cpu_freebsd_arm64.go | 9 +
.../shirou/gopsutil/v3/cpu/cpu_linux.go | 400 ++
.../shirou/gopsutil/v3/cpu/cpu_openbsd.go | 137 +
.../shirou/gopsutil/v3/cpu/cpu_openbsd_386.go | 10 +
.../gopsutil/v3/cpu/cpu_openbsd_amd64.go | 10 +
.../shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go | 10 +
.../gopsutil/v3/cpu/cpu_openbsd_arm64.go | 10 +
.../shirou/gopsutil/v3/cpu/cpu_plan9.go | 50 +
.../shirou/gopsutil/v3/cpu/cpu_solaris.go | 268 +
.../shirou/gopsutil/v3/cpu/cpu_windows.go | 233 +
.../shirou/gopsutil/v3/disk/disk.go | 96 +
.../shirou/gopsutil/v3/disk/disk_aix.go | 22 +
.../shirou/gopsutil/v3/disk/disk_aix_cgo.go | 76 +
.../shirou/gopsutil/v3/disk/disk_aix_nocgo.go | 18 +
.../shirou/gopsutil/v3/disk/disk_darwin.go | 87 +
.../gopsutil/v3/disk/disk_darwin_cgo.go | 45 +
.../gopsutil/v3/disk/disk_darwin_nocgo.go | 14 +
.../shirou/gopsutil/v3/disk/disk_fallback.go | 30 +
.../shirou/gopsutil/v3/disk/disk_freebsd.go | 193 +
.../gopsutil/v3/disk/disk_freebsd_386.go | 63 +
.../gopsutil/v3/disk/disk_freebsd_amd64.go | 66 +
.../gopsutil/v3/disk/disk_freebsd_arm.go | 63 +
.../gopsutil/v3/disk/disk_freebsd_arm64.go | 66 +
.../shirou/gopsutil/v3/disk/disk_linux.go | 538 ++
.../shirou/gopsutil/v3/disk/disk_openbsd.go | 159 +
.../gopsutil/v3/disk/disk_openbsd_386.go | 38 +
.../gopsutil/v3/disk/disk_openbsd_amd64.go | 36 +
.../gopsutil/v3/disk/disk_openbsd_arm.go | 38 +
.../gopsutil/v3/disk/disk_openbsd_arm64.go | 38 +
.../shirou/gopsutil/v3/disk/disk_solaris.go | 147 +
.../shirou/gopsutil/v3/disk/disk_unix.go | 62 +
.../shirou/gopsutil/v3/disk/disk_windows.go | 201 +
.../shirou/gopsutil/v3/disk/iostat_darwin.c | 129 +
.../shirou/gopsutil/v3/disk/iostat_darwin.h | 32 +
.../shirou/gopsutil/v3/host/host.go | 157 +
.../shirou/gopsutil/v3/host/host_bsd.go | 37 +
.../shirou/gopsutil/v3/host/host_darwin.go | 129 +
.../gopsutil/v3/host/host_darwin_amd64.go | 20 +
.../gopsutil/v3/host/host_darwin_arm64.go | 23 +
.../gopsutil/v3/host/host_darwin_cgo.go | 47 +
.../gopsutil/v3/host/host_darwin_nocgo.go | 14 +
.../shirou/gopsutil/v3/host/host_fallback.go | 50 +
.../shirou/gopsutil/v3/host/host_freebsd.go | 151 +
.../gopsutil/v3/host/host_freebsd_386.go | 37 +
.../gopsutil/v3/host/host_freebsd_amd64.go | 37 +
.../gopsutil/v3/host/host_freebsd_arm.go | 37 +
.../gopsutil/v3/host/host_freebsd_arm64.go | 40 +
.../shirou/gopsutil/v3/host/host_linux.go | 518 ++
.../shirou/gopsutil/v3/host/host_linux_386.go | 47 +
.../gopsutil/v3/host/host_linux_amd64.go | 50 +
.../shirou/gopsutil/v3/host/host_linux_arm.go | 45 +
.../gopsutil/v3/host/host_linux_arm64.go | 45 +
.../gopsutil/v3/host/host_linux_mips.go | 45 +
.../gopsutil/v3/host/host_linux_mips64.go | 45 +
.../gopsutil/v3/host/host_linux_mips64le.go | 45 +
.../gopsutil/v3/host/host_linux_mipsle.go | 45 +
.../gopsutil/v3/host/host_linux_ppc64le.go | 48 +
.../gopsutil/v3/host/host_linux_riscv64.go | 49 +
.../gopsutil/v3/host/host_linux_s390x.go | 48 +
.../shirou/gopsutil/v3/host/host_openbsd.go | 105 +
.../gopsutil/v3/host/host_openbsd_386.go | 34 +
.../gopsutil/v3/host/host_openbsd_amd64.go | 32 +
.../gopsutil/v3/host/host_openbsd_arm.go | 34 +
.../gopsutil/v3/host/host_openbsd_arm64.go | 34 +
.../shirou/gopsutil/v3/host/host_posix.go | 16 +
.../shirou/gopsutil/v3/host/host_solaris.go | 202 +
.../shirou/gopsutil/v3/host/host_windows.go | 279 +
.../shirou/gopsutil/v3/host/smc_darwin.c | 169 +
.../shirou/gopsutil/v3/host/smc_darwin.h | 32 +
.../shirou/gopsutil/v3/host/types.go | 24 +
.../gopsutil/v3/internal/common/binary.go | 636 ++
.../gopsutil/v3/internal/common/common.go | 391 ++
.../v3/internal/common/common_darwin.go | 66 +
.../v3/internal/common/common_freebsd.go | 82 +
.../v3/internal/common/common_linux.go | 294 +
.../v3/internal/common/common_openbsd.go | 66 +
.../v3/internal/common/common_unix.go | 62 +
.../v3/internal/common/common_windows.go | 301 +
.../gopsutil/v3/internal/common/endian.go | 10 +
.../gopsutil/v3/internal/common/sleep.go | 18 +
.../github.com/shirou/gopsutil/v3/mem/mem.go | 118 +
.../shirou/gopsutil/v3/mem/mem_aix.go | 16 +
.../shirou/gopsutil/v3/mem/mem_aix_cgo.go | 51 +
.../shirou/gopsutil/v3/mem/mem_aix_nocgo.go | 81 +
.../shirou/gopsutil/v3/mem/mem_bsd.go | 87 +
.../shirou/gopsutil/v3/mem/mem_darwin.go | 71 +
.../shirou/gopsutil/v3/mem/mem_darwin_cgo.go | 58 +
.../gopsutil/v3/mem/mem_darwin_nocgo.go | 89 +
.../shirou/gopsutil/v3/mem/mem_fallback.go | 34 +
.../shirou/gopsutil/v3/mem/mem_freebsd.go | 168 +
.../shirou/gopsutil/v3/mem/mem_linux.go | 525 ++
.../shirou/gopsutil/v3/mem/mem_openbsd.go | 99 +
.../shirou/gopsutil/v3/mem/mem_openbsd_386.go | 38 +
.../gopsutil/v3/mem/mem_openbsd_amd64.go | 32 +
.../shirou/gopsutil/v3/mem/mem_openbsd_arm.go | 38 +
.../gopsutil/v3/mem/mem_openbsd_arm64.go | 38 +
.../shirou/gopsutil/v3/mem/mem_plan9.go | 68 +
.../shirou/gopsutil/v3/mem/mem_solaris.go | 186 +
.../shirou/gopsutil/v3/mem/mem_windows.go | 166 +
.../github.com/shirou/gopsutil/v3/net/net.go | 273 +
.../shirou/gopsutil/v3/net/net_aix.go | 330 ++
.../shirou/gopsutil/v3/net/net_aix_cgo.go | 36 +
.../shirou/gopsutil/v3/net/net_aix_nocgo.go | 95 +
.../shirou/gopsutil/v3/net/net_darwin.go | 291 +
.../shirou/gopsutil/v3/net/net_fallback.go | 93 +
.../shirou/gopsutil/v3/net/net_freebsd.go | 128 +
.../shirou/gopsutil/v3/net/net_linux.go | 911 +++
.../shirou/gopsutil/v3/net/net_linux_111.go | 12 +
.../shirou/gopsutil/v3/net/net_linux_116.go | 12 +
.../shirou/gopsutil/v3/net/net_openbsd.go | 319 +
.../shirou/gopsutil/v3/net/net_unix.go | 224 +
.../shirou/gopsutil/v3/net/net_windows.go | 778 +++
.../shirou/gopsutil/v3/process/process.go | 620 ++
.../shirou/gopsutil/v3/process/process_bsd.go | 76 +
.../gopsutil/v3/process/process_darwin.go | 326 +
.../v3/process/process_darwin_amd64.go | 236 +
.../v3/process/process_darwin_arm64.go | 213 +
.../gopsutil/v3/process/process_darwin_cgo.go | 219 +
.../v3/process/process_darwin_nocgo.go | 127 +
.../gopsutil/v3/process/process_fallback.go | 203 +
.../gopsutil/v3/process/process_freebsd.go | 338 ++
.../v3/process/process_freebsd_386.go | 192 +
.../v3/process/process_freebsd_amd64.go | 192 +
.../v3/process/process_freebsd_arm.go | 192 +
.../v3/process/process_freebsd_arm64.go | 202 +
.../gopsutil/v3/process/process_linux.go | 1189 ++++
.../gopsutil/v3/process/process_openbsd.go | 389 ++
.../v3/process/process_openbsd_386.go | 202 +
.../v3/process/process_openbsd_amd64.go | 200 +
.../v3/process/process_openbsd_arm.go | 202 +
.../v3/process/process_openbsd_arm64.go | 203 +
.../gopsutil/v3/process/process_plan9.go | 203 +
.../gopsutil/v3/process/process_posix.go | 184 +
.../gopsutil/v3/process/process_solaris.go | 304 +
.../gopsutil/v3/process/process_windows.go | 1169 ++++
.../v3/process/process_windows_32bit.go | 109 +
.../v3/process/process_windows_64bit.go | 79 +
.../vendor/github.com/spf13/afero/.gitignore | 2 +
.../vendor/github.com/spf13/afero/LICENSE.txt | 174 +
.../vendor/github.com/spf13/afero/README.md | 442 ++
.../vendor/github.com/spf13/afero/afero.go | 111 +
.../github.com/spf13/afero/appveyor.yml | 10 +
.../vendor/github.com/spf13/afero/basepath.go | 223 +
.../github.com/spf13/afero/cacheOnReadFs.go | 315 +
.../github.com/spf13/afero/const_bsds.go | 23 +
.../github.com/spf13/afero/const_win_unix.go | 22 +
.../github.com/spf13/afero/copyOnWriteFs.go | 326 +
.../vendor/github.com/spf13/afero/httpFs.go | 114 +
.../spf13/afero/internal/common/adapters.go | 27 +
.../vendor/github.com/spf13/afero/iofs.go | 298 +
.../vendor/github.com/spf13/afero/ioutil.go | 240 +
.../vendor/github.com/spf13/afero/lstater.go | 27 +
.../vendor/github.com/spf13/afero/match.go | 110 +
.../vendor/github.com/spf13/afero/mem/dir.go | 37 +
.../github.com/spf13/afero/mem/dirmap.go | 43 +
.../vendor/github.com/spf13/afero/mem/file.go | 356 ++
.../vendor/github.com/spf13/afero/memmap.go | 404 ++
.../vendor/github.com/spf13/afero/os.go | 113 +
.../vendor/github.com/spf13/afero/path.go | 106 +
.../github.com/spf13/afero/readonlyfs.go | 96 +
.../vendor/github.com/spf13/afero/regexpfs.go | 224 +
.../vendor/github.com/spf13/afero/symlink.go | 55 +
.../github.com/spf13/afero/unionFile.go | 331 ++
.../vendor/github.com/spf13/afero/util.go | 330 ++
.../vendor/github.com/spf13/cast/.gitignore | 25 +
.../vendor/github.com/spf13/cast/LICENSE | 21 +
.../vendor/github.com/spf13/cast/Makefile | 40 +
.../vendor/github.com/spf13/cast/README.md | 75 +
.../vendor/github.com/spf13/cast/cast.go | 176 +
.../vendor/github.com/spf13/cast/caste.go | 1476 +++++
.../spf13/cast/timeformattype_string.go | 27 +
.../spf13/jwalterweatherman/.gitignore | 24 +
.../spf13/jwalterweatherman/LICENSE | 21 +
.../spf13/jwalterweatherman/README.md | 148 +
.../jwalterweatherman/default_notepad.go | 111 +
.../spf13/jwalterweatherman/log_counter.go | 46 +
.../spf13/jwalterweatherman/notepad.go | 225 +
.../github.com/spf13/viper/.editorconfig | 15 +
.../vendor/github.com/spf13/viper/.gitignore | 5 +
.../github.com/spf13/viper/.golangci.yaml | 96 +
.../vendor/github.com/spf13/viper/LICENSE | 21 +
.../vendor/github.com/spf13/viper/Makefile | 76 +
.../vendor/github.com/spf13/viper/README.md | 881 +++
.../github.com/spf13/viper/TROUBLESHOOTING.md | 32 +
.../spf13/viper/experimental_logger.go | 11 +
.../vendor/github.com/spf13/viper/flags.go | 57 +
.../vendor/github.com/spf13/viper/fs.go | 65 +
.../spf13/viper/internal/encoding/decoder.go | 61 +
.../viper/internal/encoding/dotenv/codec.go | 61 +
.../internal/encoding/dotenv/map_utils.go | 41 +
.../spf13/viper/internal/encoding/encoder.go | 60 +
.../spf13/viper/internal/encoding/error.go | 7 +
.../viper/internal/encoding/hcl/codec.go | 40 +
.../viper/internal/encoding/ini/codec.go | 99 +
.../viper/internal/encoding/ini/map_utils.go | 74 +
.../internal/encoding/javaproperties/codec.go | 86 +
.../encoding/javaproperties/map_utils.go | 74 +
.../viper/internal/encoding/json/codec.go | 17 +
.../viper/internal/encoding/toml/codec.go | 39 +
.../viper/internal/encoding/toml/codec2.go | 19 +
.../viper/internal/encoding/yaml/codec.go | 14 +
.../viper/internal/encoding/yaml/yaml2.go | 14 +
.../viper/internal/encoding/yaml/yaml3.go | 14 +
.../vendor/github.com/spf13/viper/logger.go | 77 +
.../vendor/github.com/spf13/viper/util.go | 217 +
.../vendor/github.com/spf13/viper/viper.go | 2149 +++++++
.../github.com/spf13/viper/viper_go1_15.go | 57 +
.../github.com/spf13/viper/viper_go1_16.go | 32 +
.../vendor/github.com/spf13/viper/watch.go | 12 +
.../spf13/viper/watch_unsupported.go | 32 +
.../github.com/stretchr/objx/.codeclimate.yml | 21 +
.../github.com/stretchr/objx/.gitignore | 11 +
.../vendor/github.com/stretchr/objx/LICENSE | 22 +
.../vendor/github.com/stretchr/objx/README.md | 80 +
.../github.com/stretchr/objx/Taskfile.yml | 30 +
.../github.com/stretchr/objx/accessors.go | 197 +
.../github.com/stretchr/objx/conversions.go | 280 +
.../vendor/github.com/stretchr/objx/doc.go | 66 +
.../vendor/github.com/stretchr/objx/map.go | 215 +
.../github.com/stretchr/objx/mutations.go | 77 +
.../github.com/stretchr/objx/security.go | 12 +
.../vendor/github.com/stretchr/objx/tests.go | 17 +
.../github.com/stretchr/objx/type_specific.go | 346 ++
.../stretchr/objx/type_specific_codegen.go | 2261 +++++++
.../vendor/github.com/stretchr/objx/value.go | 159 +
.../github.com/stretchr/testify/mock/doc.go | 44 +
.../github.com/stretchr/testify/mock/mock.go | 1098 ++++
.../vendor/github.com/subosito/gotenv/.env | 1 +
.../github.com/subosito/gotenv/.env.invalid | 1 +
.../github.com/subosito/gotenv/.gitignore | 4 +
.../github.com/subosito/gotenv/.golangci.yaml | 7 +
.../github.com/subosito/gotenv/CHANGELOG.md | 68 +
.../vendor/github.com/subosito/gotenv/LICENSE | 21 +
.../github.com/subosito/gotenv/README.md | 129 +
.../github.com/subosito/gotenv/gotenv.go | 369 ++
.../github.com/vardius/message-bus/.gitignore | 18 +
.../github.com/vardius/message-bus/.hound.yml | 2 +
.../vardius/message-bus/.travis.yml | 10 +
.../github.com/vardius/message-bus/LICENSE.md | 21 +
.../github.com/vardius/message-bus/README.md | 61 +
.../github.com/vardius/message-bus/bus.go | 145 +
.../github.com/vardius/message-bus/doc.go | 4 +
.../vendor/golang.org/x/text/runes/cond.go | 187 +
.../vendor/golang.org/x/text/runes/runes.go | 355 ++
.../vendor/gopkg.in/ini.v1/.editorconfig | 12 +
.../vendor/gopkg.in/ini.v1/.gitignore | 7 +
.../vendor/gopkg.in/ini.v1/.golangci.yml | 27 +
.../vendor/gopkg.in/ini.v1/LICENSE | 191 +
.../vendor/gopkg.in/ini.v1/Makefile | 15 +
.../vendor/gopkg.in/ini.v1/README.md | 43 +
.../vendor/gopkg.in/ini.v1/codecov.yml | 16 +
.../vendor/gopkg.in/ini.v1/data_source.go | 76 +
.../vendor/gopkg.in/ini.v1/deprecated.go | 22 +
.../vendor/gopkg.in/ini.v1/error.go | 49 +
.../vendor/gopkg.in/ini.v1/file.go | 541 ++
.../vendor/gopkg.in/ini.v1/helper.go | 24 +
.../integration/vendor/gopkg.in/ini.v1/ini.go | 176 +
.../integration/vendor/gopkg.in/ini.v1/key.go | 837 +++
.../vendor/gopkg.in/ini.v1/parser.go | 520 ++
.../vendor/gopkg.in/ini.v1/section.go | 256 +
.../vendor/gopkg.in/ini.v1/struct.go | 747 +++
test/integration/vendor/modules.txt | 105 +-
test/performance/go.mod | 1 +
test/performance/go.sum | 2 +
.../github.com/go-resty/resty/v2/.gitignore | 30 +
.../github.com/go-resty/resty/v2/BUILD.bazel | 48 +
.../github.com/go-resty/resty/v2/LICENSE | 21 +
.../github.com/go-resty/resty/v2/README.md | 906 +++
.../github.com/go-resty/resty/v2/WORKSPACE | 31 +
.../github.com/go-resty/resty/v2/client.go | 1115 ++++
.../go-resty/resty/v2/middleware.go | 543 ++
.../github.com/go-resty/resty/v2/redirect.go | 101 +
.../github.com/go-resty/resty/v2/request.go | 896 +++
.../github.com/go-resty/resty/v2/response.go | 175 +
.../github.com/go-resty/resty/v2/resty.go | 40 +
.../github.com/go-resty/resty/v2/retry.go | 221 +
.../github.com/go-resty/resty/v2/trace.go | 130 +
.../github.com/go-resty/resty/v2/transport.go | 35 +
.../go-resty/resty/v2/transport112.go | 34 +
.../github.com/go-resty/resty/v2/util.go | 391 ++
.../agent/sdk/v2/proto/events/event.pb.go | 1 +
.../v2/test/utils/api_process_response.go | 33 +
.../x/net/publicsuffix/data/children | Bin 0 -> 2876 bytes
.../golang.org/x/net/publicsuffix/data/nodes | Bin 0 -> 48280 bytes
.../golang.org/x/net/publicsuffix/data/text | 1 +
.../golang.org/x/net/publicsuffix/list.go | 203 +
.../golang.org/x/net/publicsuffix/table.go | 70 +
test/performance/vendor/modules.txt | 4 +
test/utils/api_process_response.go | 33 +
.../agent/sdk/v2/proto/events/event.pb.go | 1 +
540 files changed, 113375 insertions(+), 58 deletions(-)
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/.editorconfig
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/.gitattributes
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/.gitignore
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/.mailmap
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/LICENSE
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/README.md
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/backend_fen.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/backend_other.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/backend_windows.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/fsnotify.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/system_bsd.go
create mode 100644 test/integration/vendor/github.com/fsnotify/fsnotify/system_darwin.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/doc.go
create mode 100644 test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/validator.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/.gitignore
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/.travis.yml
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/LICENSE
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/Makefile
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/README.md
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/appveyor.yml
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/decoder.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/token/position.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/hcl/token/token.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/json/parser/parser.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/json/token/position.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/json/token/token.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/lex.go
create mode 100644 test/integration/vendor/github.com/hashicorp/hcl/parse.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/.gitignore
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/LICENSE
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/README.md
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
create mode 100644 test/integration/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/.gitignore
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/LICENSE
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/README.md
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/cpu.go
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/disk.go
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/doc.go
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/host.go
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/int.go
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/opts.go
create mode 100644 test/integration/vendor/github.com/lufia/plan9stats/stats.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/Makefile
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/agent/config/config_helpers.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/backoff.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/certificates.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/checksum/checksum.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/client.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/commander.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/connect.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/controller.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/errors.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/message.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/client/metric_reporter.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/config_apply.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/config_helpers.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/files/file_helpers.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/conts.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/grpc.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/meta.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/client.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/interceptors.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.pb.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.proto
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/traverser.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/sdk/v2/zip/zipped_file.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/checksum.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/config/commands.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/config/config.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/config/defaults.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/config/flags.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/config/types.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/environment.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/info.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/message.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/mock_pipe.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/network/network.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/nginx.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/os.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/pipe.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/plugin.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/process.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/slice.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/src/core/topics.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/agent_config.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/command_client.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/defaults.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/environment.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/metrics_report_client.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/nginx.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/process.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/symbols.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/system/system.go
create mode 100644 test/integration/vendor/github.com/nginx/agent/v2/test/utils/tls.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.dockerignore
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitignore
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitlab-ci.yml
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.golangci.yml
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/CREDITS
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/LICENSE
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/Makefile
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/README.md
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/analyze.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/buffer.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/build.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/errors.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/lex.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/parse.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/types.go
create mode 100644 test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/util.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/.dockerignore
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/.gitattributes
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/.gitignore
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/.golangci.toml
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/Dockerfile
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/LICENSE
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/README.md
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/ci.sh
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/doc.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/toml.abnf
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/types.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/LICENSE
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/c_helpers.c
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/c_helpers.h
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/config.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/cpustat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/diskstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/doc.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/fsstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/helpers.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/lparstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/lvmstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/memstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/netstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/procstat.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/sysconf.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/systemcfg.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_cpu.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_disk.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_fs.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_lpar.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_lvm.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_memory.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_network.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/types_process.go
create mode 100644 test/integration/vendor/github.com/power-devops/perfstat/uptime.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/LICENSE
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/host/types.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go
create mode 100644 test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/.gitignore
create mode 100644 test/integration/vendor/github.com/spf13/afero/LICENSE.txt
create mode 100644 test/integration/vendor/github.com/spf13/afero/README.md
create mode 100644 test/integration/vendor/github.com/spf13/afero/afero.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/appveyor.yml
create mode 100644 test/integration/vendor/github.com/spf13/afero/basepath.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/cacheOnReadFs.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/const_bsds.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/const_win_unix.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/copyOnWriteFs.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/httpFs.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/internal/common/adapters.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/iofs.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/ioutil.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/lstater.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/match.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/mem/dir.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/mem/dirmap.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/mem/file.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/memmap.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/os.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/path.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/readonlyfs.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/regexpfs.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/symlink.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/unionFile.go
create mode 100644 test/integration/vendor/github.com/spf13/afero/util.go
create mode 100644 test/integration/vendor/github.com/spf13/cast/.gitignore
create mode 100644 test/integration/vendor/github.com/spf13/cast/LICENSE
create mode 100644 test/integration/vendor/github.com/spf13/cast/Makefile
create mode 100644 test/integration/vendor/github.com/spf13/cast/README.md
create mode 100644 test/integration/vendor/github.com/spf13/cast/cast.go
create mode 100644 test/integration/vendor/github.com/spf13/cast/caste.go
create mode 100644 test/integration/vendor/github.com/spf13/cast/timeformattype_string.go
create mode 100644 test/integration/vendor/github.com/spf13/jwalterweatherman/.gitignore
create mode 100644 test/integration/vendor/github.com/spf13/jwalterweatherman/LICENSE
create mode 100644 test/integration/vendor/github.com/spf13/jwalterweatherman/README.md
create mode 100644 test/integration/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
create mode 100644 test/integration/vendor/github.com/spf13/jwalterweatherman/log_counter.go
create mode 100644 test/integration/vendor/github.com/spf13/jwalterweatherman/notepad.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/.editorconfig
create mode 100644 test/integration/vendor/github.com/spf13/viper/.gitignore
create mode 100644 test/integration/vendor/github.com/spf13/viper/.golangci.yaml
create mode 100644 test/integration/vendor/github.com/spf13/viper/LICENSE
create mode 100644 test/integration/vendor/github.com/spf13/viper/Makefile
create mode 100644 test/integration/vendor/github.com/spf13/viper/README.md
create mode 100644 test/integration/vendor/github.com/spf13/viper/TROUBLESHOOTING.md
create mode 100644 test/integration/vendor/github.com/spf13/viper/experimental_logger.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/flags.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/fs.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/decoder.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/encoder.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/error.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/json/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/logger.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/util.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/viper.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/viper_go1_15.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/viper_go1_16.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/watch.go
create mode 100644 test/integration/vendor/github.com/spf13/viper/watch_unsupported.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/.codeclimate.yml
create mode 100644 test/integration/vendor/github.com/stretchr/objx/.gitignore
create mode 100644 test/integration/vendor/github.com/stretchr/objx/LICENSE
create mode 100644 test/integration/vendor/github.com/stretchr/objx/README.md
create mode 100644 test/integration/vendor/github.com/stretchr/objx/Taskfile.yml
create mode 100644 test/integration/vendor/github.com/stretchr/objx/accessors.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/conversions.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/doc.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/map.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/mutations.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/security.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/tests.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/type_specific.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/type_specific_codegen.go
create mode 100644 test/integration/vendor/github.com/stretchr/objx/value.go
create mode 100644 test/integration/vendor/github.com/stretchr/testify/mock/doc.go
create mode 100644 test/integration/vendor/github.com/stretchr/testify/mock/mock.go
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/.env
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/.env.invalid
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/.gitignore
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/.golangci.yaml
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/CHANGELOG.md
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/LICENSE
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/README.md
create mode 100644 test/integration/vendor/github.com/subosito/gotenv/gotenv.go
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/.gitignore
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/.hound.yml
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/.travis.yml
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/LICENSE.md
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/README.md
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/bus.go
create mode 100644 test/integration/vendor/github.com/vardius/message-bus/doc.go
create mode 100644 test/integration/vendor/golang.org/x/text/runes/cond.go
create mode 100644 test/integration/vendor/golang.org/x/text/runes/runes.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/.editorconfig
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/.gitignore
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/.golangci.yml
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/LICENSE
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/Makefile
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/README.md
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/codecov.yml
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/data_source.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/deprecated.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/error.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/file.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/helper.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/ini.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/key.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/parser.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/section.go
create mode 100644 test/integration/vendor/gopkg.in/ini.v1/struct.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/.gitignore
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/BUILD.bazel
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/LICENSE
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/README.md
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/WORKSPACE
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/client.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/middleware.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/redirect.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/request.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/response.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/resty.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/retry.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/trace.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/transport.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/transport112.go
create mode 100644 test/performance/vendor/github.com/go-resty/resty/v2/util.go
create mode 100644 test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go
create mode 100644 test/performance/vendor/golang.org/x/net/publicsuffix/data/children
create mode 100644 test/performance/vendor/golang.org/x/net/publicsuffix/data/nodes
create mode 100644 test/performance/vendor/golang.org/x/net/publicsuffix/data/text
create mode 100644 test/performance/vendor/golang.org/x/net/publicsuffix/list.go
create mode 100644 test/performance/vendor/golang.org/x/net/publicsuffix/table.go
create mode 100644 test/utils/api_process_response.go
diff --git a/go.work.sum b/go.work.sum
index 8046fc5c5..ba0312636 100644
--- a/go.work.sum
+++ b/go.work.sum
@@ -119,26 +119,41 @@ cloud.google.com/go/websecurityscanner v1.4.0 h1:y7yIFg/h/mO+5Y5aCOtVAnpGUOgqCH5
cloud.google.com/go/workflows v1.9.0 h1:7Chpin9p50NTU8Tb7qk+I11U/IwVXmDhEoSsdccvInE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8 h1:V8krnnfGj4pV65YLUm3C0/8bl7V5Nry2Pwvy3ru/wLc=
+github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw=
+github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI=
github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
+github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
+github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U=
github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
@@ -147,6 +162,7 @@ github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloD
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aslakhellesoy/gox v1.0.100/go.mod h1:AJl542QsKKG96COVsv0N74HHzVQgDIQPceVUh1aeU2M=
github.com/aws/aws-sdk-go v1.43.16 h1:Y7wBby44f+tINqJjw5fLH3vA+gFq4uMITIKqditwM14=
github.com/aws/aws-sdk-go-v2 v1.16.3 h1:0W1TSJ7O6OzwuEvIXAtJGvOeQ0SGAhcpxPN2/NK5EhM=
@@ -167,34 +183,63 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 h1:LCQKnopq2t4oQS3VKivlYTzAHCTJZ
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 h1:Uw5wBybFQ1UeA9ts0Y07gbv0ncZnIAyw858tDW0NP2o=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 h1:+xtV90n3abQmgzk1pS++FdxZTrPEDgQng6e4/56WR2A=
github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE=
+github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
+github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
+github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY=
+github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE=
+github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/readline v1.5.0 h1:lSwwFrbNviGePhkewF1az4oLmcwqCZijQ2/Wi3BGHAI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/compose-spec/compose-go v1.8.2 h1:sUQvDxnPgpcOyoxC/lz7mFTrTlHeZ6LWyuASYetkOqw=
+github.com/compose-spec/compose-go v1.8.2/go.mod h1:Tb5Ae2PsYN3GTqYqzl2IRbTPiJtPZZjMw8UKUvmehFk=
github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=
github.com/containerd/btrfs v1.0.0 h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA=
+github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.6.14 h1:W+d0AJKVG3ioTZZyQwcw1Y3vvo6ZDYzAcjDcY4tkgGI=
+github.com/containerd/containerd v1.6.14/go.mod h1:U2NnBPIhzJDm59xF7xB2MMHnKtggpZ+phKg8o2TKj2c=
+github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
+github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
github.com/containerd/fuse-overlayfs-snapshotter v1.0.2 h1:Xy9Tkx0tk/SsMfLDFc69wzqSrxQHYEFELHBO/Z8XO3M=
github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo=
github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
github.com/containerd/imgcrypt v1.1.4 h1:iKTstFebwy3Ak5UF0RHSeuCTahC5OIrPJa6vjMAM81s=
github.com/containerd/nri v0.1.0 h1:6QioHRlThlKh2RkRTR4kIT3PKAcrLo3gIWnjkM4dQmQ=
github.com/containerd/stargz-snapshotter v0.12.0 h1:SRKo+YxmypnlyC7eKc9KNW0Ciy1Auo102s8E/aRGWKg=
+github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
github.com/containerd/zfs v1.0.0 h1:cXLJbx+4Jj7rNsTiqVfm6i+RNLx6FFA2fMmDlEf+Wm8=
github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
@@ -206,8 +251,11 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cucumber/gherkin-go/v11 v11.0.0/go.mod h1:CX33k2XU2qog4e+TFjOValoq6mIUq0DmVccZs238R9w=
github.com/cucumber/gherkin-go/v19 v19.0.3 h1:mMSKu1077ffLbTJULUfM5HPokgeBcIGboyeNUof1MdE=
github.com/cucumber/godog v0.11.0 h1:xgaWyJuAD6A+aW4TfVGNDBhuMyKW0jjl0cvY3KNxEak=
@@ -216,30 +264,74 @@ github.com/cucumber/messages-go/v10 v10.0.1/go.mod h1:kA5T38CBlBbYLU12TIrJ4fk4wS
github.com/cucumber/messages-go/v10 v10.0.3/go.mod h1:9jMZ2Y8ZxjLY6TG2+x344nt5rXstVVDYSdS5ySfI1WY=
github.com/cucumber/messages-go/v16 v16.0.1 h1:fvkpwsLgnIm0qugftrw2YwNlio+ABe2Iu94Ap8GMYIY=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73 h1:OGNva6WhsKst5OZf7eZOklDztV3hwtTHovdrLHV+MsA=
+github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
+github.com/distribution/distribution/v3 v3.0.0-20221201083218-92d136e113cf h1:q0uyPLfHgAu2Mke31RRXErLcAbuBguRpJugWxjMNRnQ=
+github.com/distribution/distribution/v3 v3.0.0-20221201083218-92d136e113cf/go.mod h1:4x0IxAMsdeCSTr9UopCvp6MnryD2nyRLycsOrgvveAs=
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk=
+github.com/docker/buildx v0.9.1 h1:VLyyJ1s4seWHmBY+oNc0J4+jHluRHLyv8KHOYYdSu3U=
+github.com/docker/buildx v0.9.1/go.mod h1:zHiKsXO4H8cgUQFaNCqitRroVta3Kj9TVzhMhe5GLlk=
+github.com/docker/cli v20.10.3-0.20221013132413-1d6c6e2367e2+incompatible h1:je9pK1shVhf561ujVYyn8ycOEfbgXFXryWxNqkYqsxg=
+github.com/docker/cli v20.10.3-0.20221013132413-1d6c6e2367e2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.5.1 h1:jIk/cCZurZERhALPVKhqlNxTQGxn2kcI+56gE57PQXg=
+github.com/docker/compose/v2 v2.15.0 h1:kdeilqTSrWBcek4MGTlRUZ8GgXgzJZDtMoeVNiFxdpU=
+github.com/docker/compose/v2 v2.15.0/go.mod h1:YcXtjGuMqyzWiDLIZ1/Jl9o3LV8hQyj1/znCFMhyBb4=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v20.10.3-0.20221013203545-33ab36d6b304+incompatible h1:ieHXawdo9MXKnRkKuVWEfEN3PDQUqIjz/T8vMfIaHkM=
+github.com/docker/docker v20.10.3-0.20221013203545-33ab36d6b304+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
+github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae h1:UTOyRlLeWJrZx+ynml6q6qzZ1uDkJe/0Z5CMZRbEIJg=
+github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
+github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd h1:hSkbZ9XSyjyBirMeqSqUrK+9HboWrweVlzRNqoBi2d4=
@@ -254,36 +346,65 @@ github.com/gobuffalo/mapi v1.0.2 h1:fq9WcL1BYrm36SzK6+aAnZ8hcp+SrmnDyAxhNx8dvJk=
github.com/gobuffalo/packd v0.1.0 h1:4sGKOD8yaYJ+dek1FDkwcxCHA40M4kfKgFHx8N2kwbU=
github.com/gobuffalo/packr/v2 v2.2.0 h1:Ir9W9XIm9j7bhhkKE9cokvtTl1vBm62A/fene/ZCj6A=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754 h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
+github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
+github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d h1:ibbzF2InxMOS+lLCphY9PHNKPURDUBNKaG6ErSq8gJQ=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 h1:kgvybwEeu0SXktbB2y3uLHX9lklLo+nzUwh59A3jzQc=
@@ -297,6 +418,8 @@ github.com/hashicorp/go-memdb v1.3.2 h1:RBKHOsnSszpU6vxq80LzC2BaQjuuvoyaQbkLTf7V
github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
@@ -306,6 +429,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
@@ -315,34 +440,66 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo=
+github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
+github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 h1:rcanfLhLDA8nozr/K289V1zcntHr3V+SHlXwzz1ZI2g=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/intel/goresctrl v0.2.0 h1:JyZjdMQu9Kl/wLXe9xA6s1X+tF6BWsQPFGJMEeCfWzE=
+github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
+github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E=
+github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=
+github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/karrick/godirwalk v1.10.3 h1:lOpSw2vJP0y5eLBW906QwKsUK/fe/QDyoqM5rnnuPDY=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f h1:I1iYfgQavGa2tgdgKn+2Qg1yQhHEETvh/mNSxG3x5c0=
+github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.6.0 h1:TDwTWbeII+88Qy55nWlof0DclgAtI4LqGujkYMzmQII=
+github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
+github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -351,62 +508,147 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzC
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
+github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a h1:NI01Z14Hbwo1MHq8ylu4HNkmKGnhk8UZsD6c6FVMcA8=
+github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a/go.mod h1:Wa+LkeUQ9NJTVXTAY38rhkfKVQcuCIo2fbavRSuGsbI=
+github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
+github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
+github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=
+github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
+github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
+github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/crypt v0.8.0 h1:xtk0uUHVWVsRBdEUGYBym4CXbcllXky2M7Qlwsf8C0Y=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
+github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
+github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
+github.com/testcontainers/testcontainers-go v0.17.0 h1:UdKSw2DJXinlS6ijbFb4VHpQzD+EfTwcTq1/19a+8PU=
+github.com/testcontainers/testcontainers-go v0.17.0/go.mod h1:n5trpHrB68IUelEqGNC8VipaCo6jOGusU44kIK11XRs=
+github.com/testcontainers/testcontainers-go/modules/compose v0.0.0-20230112145122-126aeb9ca036 h1:bILs+rbe2IHq7evF0RK+UNq7VRD5VzUEMXh+tygph3c=
+github.com/testcontainers/testcontainers-go/modules/compose v0.0.0-20230112145122-126aeb9ca036/go.mod h1:zrMEx8hFhl2khJpC640DLNuNcwJ4uWhFQG6DbPIsazk=
+github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
+github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tonistiigi/fsutil v0.0.0-20220930225714-4638ad635be5 h1:NJ1nZs4j4XcBJKIY5sAwTGp9w5b78Zxr3+r0zXRuKnA=
+github.com/tonistiigi/fsutil v0.0.0-20220930225714-4638ad635be5/go.mod h1:F83XRhNblQsKQH9hcKEE45GAOkL9590mtw9KsD0Q4fE=
github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc=
github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0=
+github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
+github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
+github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f h1:DLpt6B5oaaS8jyXHa9VA4rrZloBVPVXeCtrOsrFauxc=
+github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4 h1:u7tSpNPPswAFymm8IehJhy4uJMlUuU/GmqSkvJ1InXA=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E=
github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
@@ -421,9 +663,38 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6
go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI=
go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0/go.mod h1:vHItvsnJtp7ES++nFLLFBzUWny7fJQSvTlxFcqQGUr4=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U=
+go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk=
+go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
go.opentelemetry.io/otel/exporters/jaeger v1.4.1 h1:VHCK+2yTZDqDaVXj7JH2Z/khptuydo6C0ttBh2bxAbc=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 h1:AxqDiGk8CorEXStMDZF5Hz9vo9Z7ZZ+I5m8JRl/ko40=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 h1:8qOago/OqoFclMUUj/184tZyRdDZFpcejSjbk5Jrl6Y=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M=
go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk=
+go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE=
+go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs=
+go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
+go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE=
+go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=
+go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20221211140036-ad323defaf05 h1:T8EldfGCcveFMewH5xAYxxoX3PSQMrsechlUGVFlQBU=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
@@ -431,34 +702,91 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
gopkg.in/dancannon/gorethink.v3 v3.0.5 h1:/g7PWP7zUS6vSNmHSDbjCHQh1Rqn8Jy6zSMQxAsBSMQ=
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools/gotestsum v1.8.2 h1:szU3TaSz8wMx/uG+w/A2+4JUPwH903YYaMI9yOOYAyI=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
+k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw=
+k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
+k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck=
+k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
k8s.io/apiserver v0.22.5 h1:71krQxCUz218ecb+nPhfDsNB6QgP1/4EMvi1a2uYBlg=
+k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg=
+k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
k8s.io/component-base v0.22.5 h1:U0eHqZm7mAFE42hFwYhY6ze/MmVaW00JpMrzVsQmzYE=
k8s.io/cri-api v0.25.0 h1:INwdXsCDSA/0hGNdPxdE2dQD6ft/5K1EaKXZixvSQxg=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
+k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/scripts/docker/nginx-oss/ubuntu/Dockerfile b/scripts/docker/nginx-oss/ubuntu/Dockerfile
index d434807da..18a774974 100644
--- a/scripts/docker/nginx-oss/ubuntu/Dockerfile
+++ b/scripts/docker/nginx-oss/ubuntu/Dockerfile
@@ -2,6 +2,7 @@ ARG BASE_IMAGE
FROM ${BASE_IMAGE} as install-nginx
LABEL maintainer="NGINX Agent Maintainers "
+ARG DEBIAN_FRONTEND=noninteractive
ARG NGINX_CONF
ARG ENTRY_POINT
diff --git a/sdk/proto/events/event.pb.go b/sdk/proto/events/event.pb.go
index 2df52620d..e054b5c00 100644
--- a/sdk/proto/events/event.pb.go
+++ b/sdk/proto/events/event.pb.go
@@ -133,6 +133,7 @@ type Event struct {
// Event metadata
Metadata *Metadata `protobuf:"bytes,1,opt,name=Metadata,proto3" json:"metadata"`
// Types that are valid to be assigned to Data:
+ //
// *Event_ActivityEvent
// *Event_SecurityViolationEvent
Data isEvent_Data `protobuf_oneof:"data"`
diff --git a/test/component/agent_api_test.go b/test/component/agent_api_test.go
index 8fcf164c6..f079709b7 100644
--- a/test/component/agent_api_test.go
+++ b/test/component/agent_api_test.go
@@ -2,11 +2,14 @@ package component
import (
"context"
- "encoding/json"
"fmt"
- "io"
"net/http"
+ "strconv"
+ "strings"
"testing"
+ "time"
+
+ "github.com/go-resty/resty/v2"
"github.com/nginx/agent/sdk/v2/proto"
"github.com/nginx/agent/v2/src/core"
@@ -62,23 +65,32 @@ func TestGetNginxInstances(t *testing.T) {
agentAPI := plugins.NewAgentAPI(conf, mockEnvironment, mockNginxBinary)
agentAPI.Init(core.NewMockMessagePipe(context.TODO()))
- response, err := http.Get(fmt.Sprintf("http://localhost:%d/nginx/", port))
- assert.Nil(t, err)
+ client := resty.New()
+ client.SetRetryCount(3).SetRetryWaitTime(50 * time.Millisecond).SetRetryMaxWaitTime(200 * time.Millisecond)
- responseData, err := io.ReadAll(response.Body)
- assert.Nil(t, err)
+ url := fmt.Sprintf("http://localhost:%d/nginx", port)
+ response, err := client.R().EnableTrace().Get(url)
- var nginxDetailsResponse []*proto.NginxDetails
- err = json.Unmarshal(responseData, &nginxDetailsResponse)
- assert.Nil(t, err)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode())
- assert.Equal(t, http.StatusOK, response.StatusCode)
- assert.True(t, json.Valid(responseData))
if tt.nginxDetails == nil {
- assert.Equal(t, 0, len(nginxDetailsResponse))
+ assert.Equal(t, "[]", response.String())
} else {
- assert.Equal(t, 1, len(nginxDetailsResponse))
- assert.Equal(t, tt.nginxDetails, nginxDetailsResponse[0])
+ nginxDetails := tutils.ProcessApiNginxInstanceResponse(response)
+ for _, detail := range nginxDetails {
+ detail := strings.Split(detail, ":")
+ switch {
+ case strings.Contains(detail[0], "nginx_id"):
+ assert.Equal(t, "45d4sf5d4sf4e8s4f8es4564", detail[1])
+ case strings.Contains(detail[0], "version"):
+ assert.Equal(t, "21", detail[1])
+ case strings.Contains(detail[0], "conf_path"):
+ assert.Equal(t, "/etc/nginx/conf", detail[1])
+ case strings.Contains(detail[0], "start_time"):
+ assert.Equal(t, "1238043824", detail[1])
+ }
+ }
}
agentAPI.Close()
@@ -101,12 +113,17 @@ func TestInvalidPath(t *testing.T) {
agentAPI := plugins.NewAgentAPI(conf, mockEnvironment, mockNginxBinary)
agentAPI.Init(core.NewMockMessagePipe(context.TODO()))
- response, err := http.Get(fmt.Sprintf("http://localhost:%d/invalid/", port))
+ client := resty.New()
+ client.SetRetryCount(3).SetRetryWaitTime(50 * time.Millisecond).SetRetryMaxWaitTime(200 * time.Millisecond)
+
+ url := fmt.Sprintf("http://localhost:%d/invalid/", port)
+ response, err := client.R().EnableTrace().Get(url)
+
assert.Nil(t, err)
agentAPI.Close()
- assert.Equal(t, http.StatusNotFound, response.StatusCode)
+ assert.Equal(t, http.StatusNotFound, response.StatusCode())
}
func TestMetrics(t *testing.T) {
@@ -144,22 +161,40 @@ func TestMetrics(t *testing.T) {
Name: "system.cpu.idle",
Value: 12,
},
+ {
+ Name: "nginx.workers.count",
+ Value: 6,
+ },
},
},
},
}))
- response, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
- assert.Nil(t, err)
+ client := resty.New()
- assert.Equal(t, http.StatusOK, response.StatusCode)
+ url := fmt.Sprintf("http://localhost:%d/metrics", port)
+ client.SetRetryCount(3).SetRetryWaitTime(50 * time.Millisecond).SetRetryMaxWaitTime(200 * time.Millisecond)
- responseData, err := io.ReadAll(response.Body)
+ response, err := client.R().EnableTrace().Get(url)
assert.Nil(t, err)
-
+ assert.Equal(t, http.StatusOK, response.StatusCode())
+ assert.Contains(t, response.String(), "# HELP system_cpu_idle")
+ assert.Contains(t, response.String(), "# TYPE system_cpu_idle gauge")
agentAPI.Close()
- assert.Contains(t, string(responseData), "# HELP system_cpu_idle")
- assert.Contains(t, string(responseData), "# TYPE system_cpu_idle gauge")
- assert.Contains(t, string(responseData), "system_cpu_idle{hostname=\"example.com\",system_tags=\"\"} 12")
+ responseData := tutils.ProcessApiMetricResponse(response)
+
+ for _, m := range responseData {
+ metric := strings.Split(m, " ")
+ switch {
+ case strings.Contains(metric[0], "system_cpu_idle"):
+ value, _ := strconv.ParseFloat(metric[1], 64)
+ assert.Equal(t, float64(12), value)
+ case strings.Contains(metric[0], "nginx_workers_count"):
+ value, _ := strconv.ParseFloat(metric[1], 64)
+ assert.Equal(t, float64(6), value)
+ }
+
+ }
+
}
diff --git a/test/integration/api/api_test.go b/test/integration/api/api_test.go
index d22f7951e..8d3d4641a 100644
--- a/test/integration/api/api_test.go
+++ b/test/integration/api/api_test.go
@@ -11,6 +11,8 @@ import (
"time"
"github.com/go-resty/resty/v2"
+ tutils "github.com/nginx/agent/v2/test/utils"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/modules/compose"
@@ -55,23 +57,19 @@ func TestAPI_Nginx(t *testing.T) {
assert.Contains(t, resp.String(), "nginx_id")
assert.NotContains(t, resp.String(), "test_fail_nginx")
- nginxDetails := strings.Split(resp.String(), " ")
+ nginxDetails := tutils.ProcessApiNginxInstanceResponse(resp)
for _, detail := range nginxDetails {
detail := strings.Split(detail, ":")
-
switch {
case strings.Contains(detail[0], "nginx_id"):
assert.NotNil(t, detail[1])
-
case strings.Contains(detail[0], "version"):
assert.NotNil(t, detail[1])
-
case strings.Contains(detail[0], "runtime_modules"):
- assert.Equal(t, detail[1], "http_stub_status_module")
-
+ assert.Contains(t, detail[1], "http_ssl_module")
case strings.Contains(detail[0], "conf_path"):
- assert.Equal(t, detail[1], "/usr/local/nginx/conf/nginx.conf")
+ assert.Equal(t, "/etc/nginx/nginx.conf", detail[1])
}
}
@@ -95,7 +93,7 @@ func TestAPI_Metrics(t *testing.T) {
assert.Contains(t, resp.String(), "system_cpu_system")
assert.NotContains(t, resp.String(), "test_fail_metric")
- metrics := processResponse(resp)
+ metrics := tutils.ProcessApiMetricResponse(resp)
for _, m := range metrics {
metric := strings.Split(m, " ")
@@ -119,20 +117,3 @@ func TestAPI_Metrics(t *testing.T) {
}
}
}
-
-func processResponse(resp *resty.Response) []string {
- metrics := strings.Split(resp.String(), "\n")
-
- i := 0
-
- for _, metric := range metrics {
- if metric[0:1] != "#" {
- metrics[i] = metric
- i++
- }
- }
-
- metrics = metrics[:i]
-
- return metrics
-}
diff --git a/test/integration/go.mod b/test/integration/go.mod
index 422eaa24f..cc2f9a7b7 100644
--- a/test/integration/go.mod
+++ b/test/integration/go.mod
@@ -14,7 +14,6 @@ require (
)
require (
- cloud.google.com/go/compute/metadata v0.2.1 // indirect
github.com/AlecAivazis/survey/v2 v2.3.6 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
@@ -41,6 +40,7 @@ require (
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
+ github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -61,13 +61,15 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.15.13 // indirect
- github.com/kr/pretty v0.3.1 // indirect
+ github.com/klauspost/cpuid/v2 v2.1.0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
@@ -88,29 +90,41 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
+ github.com/nginx/agent/sdk/v2 v2.0.0-00010101000000-000000000000 // indirect
+ github.com/nginxinc/nginx-go-crossplane v0.4.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/orcaman/concurrent-map v1.0.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
+ github.com/shirou/gopsutil/v3 v3.22.7 // indirect
+ github.com/spf13/afero v1.9.2 // indirect
+ github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/cobra v1.6.1 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.14.0 // indirect
+ github.com/stretchr/objx v0.5.0 // indirect
+ github.com/subosito/gotenv v1.4.1 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect
github.com/tonistiigi/fsutil v0.0.0-20220930225714-4638ad635be5 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect
+ github.com/vardius/message-bus v1.1.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@@ -142,6 +156,7 @@ require (
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.24.1 // indirect
@@ -169,3 +184,5 @@ replace (
k8s.io/client-go => k8s.io/client-go v0.22.4
)
+
+replace github.com/nginx/agent/sdk/v2 => ./../../sdk
diff --git a/test/integration/go.sum b/test/integration/go.sum
index 301be504e..b46a09298 100644
--- a/test/integration/go.sum
+++ b/test/integration/go.sum
@@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -13,6 +14,9 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
@@ -22,7 +26,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
-cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -34,6 +37,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw=
github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI=
@@ -96,6 +100,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cloudflare/cfssl v1.4.1 h1:vScfU2DrIUI9VPHBVeeAQ0q5A+9yshO1Gz+3QoUQiKw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@@ -162,6 +167,7 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
@@ -172,9 +178,11 @@ github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -207,6 +215,7 @@ github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSM
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
@@ -263,9 +272,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -274,6 +286,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -281,6 +294,10 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -293,6 +310,7 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
@@ -314,10 +332,12 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
@@ -340,6 +360,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
@@ -350,20 +371,25 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0=
github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
+github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
+github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 h1:aczX6NMOtt6L4YT0fQvKkDK6LZEtdOso9sUH89V1+P0=
+github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281/go.mod h1:lc+czkgO/8F7puNki5jk8QyujbfK1LOT7Wl0ON2hxyk=
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
@@ -384,6 +410,7 @@ github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.5.0/go.mod h1:fJ0UAZc1fx3xZhU4eSHQDJ1ApFmTVhp5VTpV9tm2ogg=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@@ -427,17 +454,24 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nginx/agent/v2 v2.22.0 h1:MRDg67kRKtaXqVmNO9yWcllj7oD1d6iuz3xAUyJVWC0=
github.com/nginx/agent/v2 v2.22.0/go.mod h1:qyc0p+kuv+sC9xJYd3S1JbxIej9v+E+X0rrh3x9V5hg=
+github.com/nginxinc/nginx-go-crossplane v0.4.1 h1:swWcI437atMpMT/l6GuEu0oRhkMBOhh0DGHCOd2QgOc=
+github.com/nginxinc/nginx-go-crossplane v0.4.1/go.mod h1:NH9Gmsd1gxoLFJHZPxL9I4Z3qeA2n1BXCKiN2TtW2bc=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@@ -458,14 +492,18 @@ github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CF
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
+github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI=
+github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@@ -506,11 +544,14 @@ github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZV
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil/v3 v3.22.7 h1:flKnuCMfUUrO+oAvwAd6GKZgnPzr098VA/UJ14nhJd4=
+github.com/shirou/gopsutil/v3 v3.22.7/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -522,13 +563,16 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -541,6 +585,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -553,6 +598,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
+github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/testcontainers/testcontainers-go v0.17.0 h1:UdKSw2DJXinlS6ijbFb4VHpQzD+EfTwcTq1/19a+8PU=
github.com/testcontainers/testcontainers-go v0.17.0/go.mod h1:n5trpHrB68IUelEqGNC8VipaCo6jOGusU44kIK11XRs=
@@ -572,6 +618,8 @@ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f h1:DLpt6B5oaaS8jyXHa9VA4rrZloBVPVXeCtrOsrFauxc=
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vardius/message-bus v1.1.5 h1:YSAC2WB4HRlwc4neFPTmT88kzzoiQ+9WRRbej/E/LZc=
+github.com/vardius/message-bus v1.1.5/go.mod h1:6xladCV2lMkUAE4bzzS85qKOiB5miV7aBVRafiTJGqw=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/weppos/publicsuffix-go v0.20.0 h1:59ypvSUbW3Dunc6zVm+v+MmXf2Q6cGiNDkxgRIzEnaA=
@@ -587,6 +635,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zmap/zcrypto v0.0.0-20220605182715-4dfcec6e9a8c h1:ufDm/IlBYZYLuiqvQuhpTKwrcAS2OlXEzWbDvTVGbSQ=
@@ -596,6 +645,7 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs=
@@ -643,6 +693,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -667,6 +720,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -675,7 +729,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -710,10 +767,15 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@@ -724,6 +786,10 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
@@ -783,9 +849,16 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -797,15 +870,20 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@@ -820,6 +898,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
@@ -871,9 +950,18 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -896,6 +984,9 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -935,7 +1026,14 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70=
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
@@ -951,7 +1049,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
@@ -981,12 +1083,14 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/.editorconfig b/test/integration/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 000000000..fad895851
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*.go]
+indent_style = tab
+indent_size = 4
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+trim_trailing_whitespace = true
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/.gitattributes b/test/integration/vendor/github.com/fsnotify/fsnotify/.gitattributes
new file mode 100644
index 000000000..32f1001be
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/.gitattributes
@@ -0,0 +1 @@
+go.sum linguist-generated
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/.gitignore b/test/integration/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 000000000..1d89d85ce
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# go test -c output
+*.test
+*.test.exe
+
+# Output of go build ./cmd/fsnotify
+/fsnotify
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/.mailmap b/test/integration/vendor/github.com/fsnotify/fsnotify/.mailmap
new file mode 100644
index 000000000..a04f2907f
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/.mailmap
@@ -0,0 +1,2 @@
+Chris Howey
+Nathan Youngman <4566+nathany@users.noreply.github.com>
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/test/integration/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 000000000..77f9593bd
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,470 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+Nothing yet.
+
+## [1.6.0] - 2022-10-13
+
+This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
+but not documented). It also increases the minimum Linux version to 2.6.32.
+
+### Additions
+
+- all: add `Event.Has()` and `Op.Has()` ([#477])
+
+ This makes checking events a lot easier; for example:
+
+ if event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ }
+
+ Becomes:
+
+ if event.Has(Write) && !event.Has(Remove) {
+ }
+
+- all: add cmd/fsnotify ([#463])
+
+ A command-line utility for testing and some examples.
+
+### Changes and fixes
+
+- inotify: don't ignore events for files that don't exist ([#260], [#470])
+
+ Previously the inotify watcher would call `os.Lstat()` to check if a file
+ still exists before emitting events.
+
+ This was inconsistent with other platforms and resulted in inconsistent event
+ reporting (e.g. when a file is quickly removed and re-created), and generally
+ a source of confusion. It was added in 2013 to fix a memory leak that no
+ longer exists.
+
+- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
+ not watched ([#460])
+
+- inotify: replace epoll() with non-blocking inotify ([#434])
+
+ Non-blocking inotify was not generally available at the time this library was
+ written in 2014, but now it is. As a result, the minimum Linux version is
+ bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
+
+- kqueue: don't check for events every 100ms ([#480])
+
+ The watcher would wake up every 100ms, even when there was nothing to do. Now
+ it waits until there is something to do.
+
+- macos: retry opening files on EINTR ([#475])
+
+- kqueue: skip unreadable files ([#479])
+
+ kqueue requires a file descriptor for every file in a directory; this would
+ fail if a file was unreadable by the current user. Now these files are simply
+ skipped.
+
+- windows: fix renaming a watched directory if the parent is also watched ([#370])
+
+- windows: increase buffer size from 4K to 64K ([#485])
+
+- windows: close file handle on Remove() ([#288])
+
+- kqueue: put pathname in the error if watching a file fails ([#471])
+
+- inotify, windows: calling Close() more than once could race ([#465])
+
+- kqueue: improve Close() performance ([#233])
+
+- all: various documentation additions and clarifications.
+
+[#233]: https://github.com/fsnotify/fsnotify/pull/233
+[#260]: https://github.com/fsnotify/fsnotify/pull/260
+[#288]: https://github.com/fsnotify/fsnotify/pull/288
+[#370]: https://github.com/fsnotify/fsnotify/pull/370
+[#434]: https://github.com/fsnotify/fsnotify/pull/434
+[#460]: https://github.com/fsnotify/fsnotify/pull/460
+[#463]: https://github.com/fsnotify/fsnotify/pull/463
+[#465]: https://github.com/fsnotify/fsnotify/pull/465
+[#470]: https://github.com/fsnotify/fsnotify/pull/470
+[#471]: https://github.com/fsnotify/fsnotify/pull/471
+[#475]: https://github.com/fsnotify/fsnotify/pull/475
+[#477]: https://github.com/fsnotify/fsnotify/pull/477
+[#479]: https://github.com/fsnotify/fsnotify/pull/479
+[#480]: https://github.com/fsnotify/fsnotify/pull/480
+[#485]: https://github.com/fsnotify/fsnotify/pull/485
+
+## [1.5.4] - 2022-04-25
+
+* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
+* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
+* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
+
+## [1.5.3] - 2022-04-22
+
+* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
+
+## [1.5.2] - 2022-04-21
+
+* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
+* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
+* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
+* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
+* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
+
+## [1.5.1] - 2021-08-24
+
+* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
+
+## [1.5.0] - 2021-08-20
+
+* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
+* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
+* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
+* CI: Use GitHub Actions for CI and cover go 1.12-1.17
+ [#378](https://github.com/fsnotify/fsnotify/pull/378)
+ [#381](https://github.com/fsnotify/fsnotify/pull/381)
+ [#385](https://github.com/fsnotify/fsnotify/pull/385)
+* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
+
+## [1.4.9] - 2020-03-11
+
+* Move example usage to the readme #329. This may resolve #328.
+
+## [1.4.8] - 2020-03-10
+
+* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
+* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
+* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
+* CI: Less verbosity (@nathany #267)
+* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
+* Tests: Check if channels are closed in the example (@alexeykazakov #244)
+* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
+* CI: Add windows to travis matrix (@cpuguy83 #284)
+* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
+* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
+* Linux: open files with close-on-exec (@linxiulei #273)
+* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
+* Project: Add go.mod (@nathany #309)
+* Project: Revise editor config (@nathany #309)
+* Project: Update copyright for 2019 (@nathany #309)
+* CI: Drop go1.8 from CI matrix (@nathany #309)
+* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
+
+## [1.4.7] - 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## [1.4.2] - 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## [1.4.1] - 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## [1.4.0] - 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## [1.3.1] - 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## [1.3.0] - 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## [1.2.10] - 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## [1.2.9] - 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## [1.2.8] - 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## [1.2.5] - 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## [1.2.1] - 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## [1.2.0] - 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## [1.1.1] - 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## [1.1.0] - 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## [1.0.4] - 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## [1.0.3] - 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## [1.0.2] - 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## [1.0.0] - 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## [0.9.3] - 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## [0.9.2] - 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## [0.9.1] - 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## [0.9.0] - 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## [0.8.12] - 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## [0.8.11] - 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## [0.8.10] - 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## [0.8.9] - 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## [0.8.8] - 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## [0.8.7] - 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## [0.8.6] - 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## [0.8.5] - 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## [0.8.4] - 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## [0.8.3] - 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## [0.8.2] - 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## [0.8.1] - 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## [0.8.0] - 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## [0.7.4] - 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## [0.7.3] - 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## [0.7.2] - 2012-09-01
+
+* kqueue: events for created directories
+
+## [0.7.1] - 2012-07-14
+
+* [Fix] for renaming files
+
+## [0.7.0] - 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## [0.6.0] - 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## [0.5.1] - 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## [0.5.0] - 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## [0.4.0] - 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## [0.3.0] - 2012-02-19
+
+* kqueue: add files when watch directory
+
+## [0.2.0] - 2011-12-30
+
+* update to latest Go weekly code
+
+## [0.1.0] - 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/test/integration/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 000000000..ea379759d
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+Thank you for your interest in contributing to fsnotify! We try to review and
+merge PRs in a reasonable timeframe, but please be aware that:
+
+- To avoid "wasted" work, please discus changes on the issue tracker first. You
+ can just send PRs, but they may end up being rejected for one reason or the
+ other.
+
+- fsnotify is a cross-platform library, and changes must work reasonably well on
+ all supported platforms.
+
+- Changes will need to be compatible; old code should still compile, and the
+ runtime behaviour can't change in ways that are likely to lead to problems for
+ users.
+
+Testing
+-------
+Just `go test ./...` runs all the tests; the CI runs this on all supported
+platforms. Testing different platforms locally can be done with something like
+[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
+
+Use the `-short` flag to make the "stress test" run faster.
+
+
+[goon]: https://github.com/arp242/goon
+[Vagrant]: https://www.vagrantup.com/
+[integration_test.go]: /integration_test.go
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/LICENSE b/test/integration/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 000000000..fb03ade75
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,25 @@
+Copyright © 2012 The Go Authors. All rights reserved.
+Copyright © fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+* Neither the name of Google Inc. nor the names of its contributors may be used
+ to endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/README.md b/test/integration/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 000000000..d4e6080fe
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,161 @@
+fsnotify is a Go library to provide cross-platform filesystem notifications on
+Windows, Linux, macOS, and BSD systems.
+
+Go 1.16 or newer is required; the full documentation is at
+https://pkg.go.dev/github.com/fsnotify/fsnotify
+
+**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
+released version, whereas this README is for the last development version which
+may include additions/changes.**
+
+---
+
+Platform support:
+
+| Adapter | OS | Status |
+| --------------------- | ---------------| -------------------------------------------------------------|
+| inotify | Linux 2.6.32+ | Supported |
+| kqueue | BSD, macOS | Supported |
+| ReadDirectoryChangesW | Windows | Supported |
+| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
+| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
+| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
+| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
+| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
+
+Linux and macOS should include Android and iOS, but these are currently untested.
+
+Usage
+-----
+A basic example:
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func main() {
+ // Create new watcher.
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ // Start listening for events.
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ log.Println("event:", event)
+ if event.Has(fsnotify.Write) {
+ log.Println("modified file:", event.Name)
+ }
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ // Add a path.
+ err = watcher.Add("/tmp")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Block main goroutine forever.
+ <-make(chan struct{})
+}
+```
+
+Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
+run with:
+
+ % go run ./cmd/fsnotify
+
+FAQ
+---
+### Will a file still be watched when it's moved to another directory?
+No, not unless you are watching the location it was moved to.
+
+### Are subdirectories watched too?
+No, you must add watches for any directory you want to watch (a recursive
+watcher is on the roadmap: [#18]).
+
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+
+### Do I have to watch the Error and Event channels in a goroutine?
+As of now, yes (you can read both channels in the same goroutine using `select`,
+you don't need a separate goroutine for both channels; see the example).
+
+### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
+fsnotify requires support from underlying OS to work. The current NFS and SMB
+protocols does not provide network level support for file notifications, and
+neither do the /proc and /sys virtual filesystems.
+
+This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
+
+[#9]: https://github.com/fsnotify/fsnotify/issues/9
+
+Platform-specific notes
+-----------------------
+### Linux
+When a file is removed a REMOVE event won't be emitted until all file
+descriptors are closed; it will emit a CHMOD instead:
+
+ fp := os.Open("file")
+ os.Remove("file") // CHMOD
+ fp.Close() // REMOVE
+
+This is the event that inotify sends, so not much can be changed about this.
+
+The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
+the number of watches per user, and `fs.inotify.max_user_instances` specifies
+the maximum number of inotify instances per user. Every Watcher you create is an
+"instance", and every path you add is a "watch".
+
+These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
+`/proc/sys/fs/inotify/max_user_instances`
+
+To increase them you can use `sysctl` or write the value to proc file:
+
+ # The default values on Linux 5.18
+ sysctl fs.inotify.max_user_watches=124983
+ sysctl fs.inotify.max_user_instances=128
+
+To make the changes persist on reboot edit `/etc/sysctl.conf` or
+`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
+distro's documentation):
+
+ fs.inotify.max_user_watches=124983
+ fs.inotify.max_user_instances=128
+
+Reaching the limit will result in a "no space left on device" or "too many open
+files" error.
+
+### kqueue (macOS, all BSD systems)
+kqueue requires opening a file descriptor for every file that's being watched;
+so if you're watching a directory with five files then that's six file
+descriptors. You will run in to your system's "max open files" limit faster on
+these platforms.
+
+The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
+control the maximum number of open files.
+
+### macOS
+Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
+workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
+have a native FSEvents implementation (see [#11]).
+
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#15]: https://github.com/fsnotify/fsnotify/issues/15
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_fen.go
new file mode 100644
index 000000000..1a95ad8e7
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_fen.go
@@ -0,0 +1,162 @@
+//go:build solaris
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # macOS notes
+//
+// Spotlight indexing on macOS can result in multiple events (see [#15]). A
+// temporary workaround is to add your folder(s) to the "Spotlight Privacy
+// Settings" until we have a native FSEvents implementation (see [#11]).
+//
+// [#11]: https://github.com/fsnotify/fsnotify/issues/11
+// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, so you
+ // probably want to wait until you've stopped receiving
+ // them (see the dedup example in cmd/fsnotify).
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // and on kqueue when a file is truncated. On Windows
+ // it's never sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ Errors chan error
+}
+
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; attempting to watch it more than once will
+// return an error. Paths that do not yet exist on the filesystem cannot be
+// added. A watch will be automatically removed if the path is deleted.
+//
+// A path will remain watched if it gets renamed to somewhere else on the same
+// filesystem, but the monitor will get removed if the path gets deleted and
+// re-created, or if it's moved to a different filesystem.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many tools update files atomically. Instead of "just" writing
+// to the file a temporary file will be written to first, and if successful the
+// temporary file is moved to to destination removing the original, or some
+// variant thereof. The watcher on the original file is now lost, as it no
+// longer exists.
+//
+// Instead, watch the parent directory and use Event.Name to filter out files
+// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
new file mode 100644
index 000000000..54c77fbb0
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
@@ -0,0 +1,459 @@
+//go:build linux
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # macOS notes
+//
+// Spotlight indexing on macOS can result in multiple events (see [#15]). A
+// temporary workaround is to add your folder(s) to the "Spotlight Privacy
+// Settings" until we have a native FSEvents implementation (see [#11]).
+//
+// [#11]: https://github.com/fsnotify/fsnotify/issues/11
+// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, so you
+ // probably want to wait until you've stopped receiving
+ // them (see the dedup example in cmd/fsnotify).
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // and on kqueue when a file is truncated. On Windows
+ // it's never sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ Errors chan error
+
+ // Store fd here as os.File.Read() will no longer return on close after
+ // calling Fd(). See: https://github.com/golang/go/issues/26439
+ fd int
+ mu sync.Mutex // Map access
+ inotifyFile *os.File
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ // Need to set the FD to nonblocking mode in order for SetDeadline methods to work
+ // Otherwise, blocking i/o operations won't terminate on close
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
+ if fd == -1 {
+ return nil, errno
+ }
+
+ w := &Watcher{
+ fd: fd,
+ inotifyFile: os.NewFile(uintptr(fd), ""),
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Returns true if the event was sent, or false if watcher is closed.
+func (w *Watcher) sendEvent(e Event) bool {
+ select {
+ case w.Events <- e:
+ return true
+ case <-w.done:
+ }
+ return false
+}
+
+// Returns true if the error was sent, or false if watcher is closed.
+func (w *Watcher) sendError(err error) bool {
+ select {
+ case w.Errors <- err:
+ return true
+ case <-w.done:
+ return false
+ }
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed() {
+ w.mu.Unlock()
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+ w.mu.Unlock()
+
+ // Causes any blocking reads to return with an error, provided the file
+ // still supports deadline operations.
+ err := w.inotifyFile.Close()
+ if err != nil {
+ return err
+ }
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; attempting to watch it more than once will
+// return an error. Paths that do not yet exist on the filesystem cannot be
+// added. A watch will be automatically removed if the path is deleted.
+//
+// A path will remain watched if it gets renamed to somewhere else on the same
+// filesystem, but the monitor will get removed if the path gets deleted and
+// re-created, or if it's moved to a different filesystem.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many tools update files atomically. Instead of "just" writing
+// to the file a temporary file will be written to first, and if successful the
+// temporary file is moved to to destination removing the original, or some
+// variant thereof. The watcher on the original file is now lost, as it no
+// longer exists.
+//
+// Instead, watch the parent directory and use Event.Name to filter out files
+// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case;
+ // The only two possible errors are:
+ //
+ // - EBADF, which happens when w.fd is not a valid file descriptor
+ // of any kind.
+ // - EINVAL, which is when fd is not an inotify descriptor or wd
+ // is not a valid watch descriptor. Watch descriptors are
+ // invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they
+ // are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+// WatchList returns all paths added with [Add] (and are not yet removed).
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ defer func() {
+ close(w.doneResp)
+ close(w.Errors)
+ close(w.Events)
+ }()
+
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ errno error // Syscall errno
+ )
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ n, err := w.inotifyFile.Read(buf[:])
+ switch {
+ case errors.Unwrap(err) == os.ErrClosed:
+ return
+ case err != nil:
+ if !w.sendError(err) {
+ return
+ }
+ continue
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ if !w.sendError(err) {
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ var (
+ // Point "raw" to the event in the buffer
+ raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+ mask = uint32(raw.Mask)
+ nameLen = uint32(raw.Len)
+ )
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ if !w.sendError(ErrEventOverflow) {
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := w.newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if mask&unix.IN_IGNORED == 0 {
+ if !w.sendEvent(event) {
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func (w *Watcher) newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
new file mode 100644
index 000000000..29087469b
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
@@ -0,0 +1,707 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # macOS notes
+//
+// Spotlight indexing on macOS can result in multiple events (see [#15]). A
+// temporary workaround is to add your folder(s) to the "Spotlight Privacy
+// Settings" until we have a native FSEvents implementation (see [#11]).
+//
+// [#11]: https://github.com/fsnotify/fsnotify/issues/11
+// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, so you
+ // probably want to wait until you've stopped receiving
+ // them (see the dedup example in cmd/fsnotify).
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // and on kqueue when a file is truncated. On Windows
+ // it's never sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ Errors chan error
+
+ done chan struct{}
+ kq int // File descriptor (as returned by the kqueue() syscall).
+ closepipe [2]int // Pipe used for closing.
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Watched file descriptors (key: path).
+ watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
+ userWatches map[string]struct{} // Watches added with Watcher.Add()
+ dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
+ fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ kq, closepipe, err := newKqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ closepipe: closepipe,
+ watches: make(map[string]int),
+ watchesByDir: make(map[string]map[int]struct{}),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]struct{}),
+ userWatches: make(map[string]struct{}),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// newKqueue creates a new kernel event queue and returns a descriptor.
+//
+// This registers a new event on closepipe, which will trigger an event when
+// it's closed. This way we can use kevent() without timeout/polling; without
+// the closepipe, it would block forever and we wouldn't be able to stop it at
+// all.
+func newKqueue() (kq int, closepipe [2]int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, closepipe, err
+ }
+
+ // Register the close pipe.
+ err = unix.Pipe(closepipe[:])
+ if err != nil {
+ unix.Close(kq)
+ return kq, closepipe, err
+ }
+
+ // Register changes to listen on the closepipe.
+ changes := make([]unix.Kevent_t, 1)
+ // SetKevent converts int to the platform-specific types.
+ unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
+ unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
+
+ ok, err := unix.Kevent(kq, changes, nil, nil)
+ if ok == -1 {
+ unix.Close(kq)
+ unix.Close(closepipe[0])
+ unix.Close(closepipe[1])
+ return kq, closepipe, err
+ }
+ return kq, closepipe, nil
+}
+
+// Returns true if the event was sent, or false if watcher is closed.
+func (w *Watcher) sendEvent(e Event) bool {
+ select {
+ case w.Events <- e:
+ return true
+ case <-w.done:
+ }
+ return false
+}
+
+// Returns true if the error was sent, or false if watcher is closed.
+func (w *Watcher) sendError(err error) bool {
+ select {
+ case w.Errors <- err:
+ return true
+ case <-w.done:
+ }
+ return false
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ pathsToRemove := make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock() // Unlock before calling Remove, which also locks
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // Send "quit" message to the reader goroutine.
+ unix.Close(w.closepipe[1])
+ close(w.done)
+
+ return nil
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; attempting to watch it more than once will
+// return an error. Paths that do not yet exist on the filesystem cannot be
+// added. A watch will be automatically removed if the path is deleted.
+//
+// A path will remain watched if it gets renamed to somewhere else on the same
+// filesystem, but the monitor will get removed if the path gets deleted and
+// re-created, or if it's moved to a different filesystem.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many tools update files atomically. Instead of "just" writing
+// to the file a temporary file will be written to first, and if successful the
+// temporary file is moved to to destination removing the original, or some
+// variant thereof. The watcher on the original file is now lost, as it no
+// longer exists.
+//
+// Instead, watch the parent directory and use Event.Name to filter out files
+// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.userWatches[name] = struct{}{}
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
+ }
+
+ err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
+ if err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.userWatches, name)
+
+ parentName := filepath.Dir(name)
+ delete(w.watchesByDir[parentName], watchfd)
+
+ if len(w.watchesByDir[parentName]) == 0 {
+ delete(w.watchesByDir, parentName)
+ }
+
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ delete(w.fileExists, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for fd := range w.watchesByDir[name] {
+ path := w.paths[fd]
+ if _, ok := w.userWatches[path.name]; !ok {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// WatchList returns all paths added with [Add] (and are not yet removed).
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.userWatches))
+ for pathname := range w.userWatches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets or named pipes
+ if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ //
+ // Linux can add unresolvable symlinks to the watch list without issue,
+ // and Windows can't do symlinks period. To maintain consistency, we
+ // will act like everything is fine if the link can't be resolved.
+ // There will simply be no file events for broken symlinks. Hence the
+ // returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ // Retry on EINTR; open() can return EINTR in practice on macOS.
+ // See #354, and go issues 11180 and 39237.
+ for {
+ watchfd, err = unix.Open(name, openMode, 0)
+ if err == nil {
+ break
+ }
+ if errors.Is(err, unix.EINTR) {
+ continue
+ }
+
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
+ if err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ parentName := filepath.Dir(name)
+ w.watches[name] = watchfd
+
+ watchesByDir, ok := w.watchesByDir[parentName]
+ if !ok {
+ watchesByDir = make(map[int]struct{}, 1)
+ w.watchesByDir[parentName] = watchesByDir
+ }
+ watchesByDir[watchfd] = struct{}{}
+
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ defer func() {
+ err := unix.Close(w.kq)
+ if err != nil {
+ w.Errors <- err
+ }
+ unix.Close(w.closepipe[0])
+ close(w.Events)
+ close(w.Errors)
+ }()
+
+ eventBuffer := make([]unix.Kevent_t, 10)
+ for closed := false; !closed; {
+ kevents, err := w.read(eventBuffer)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
+ closed = true
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for _, kevent := range kevents {
+ var (
+ watchfd = int(kevent.Ident)
+ mask = uint32(kevent.Fflags)
+ )
+
+ // Shut down the loop when the pipe is closed, but only after all
+ // other events have been processed.
+ if watchfd == w.closepipe[0] {
+ closed = true
+ continue
+ }
+
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+
+ event := w.newEvent(path.name, mask)
+
+ if path.isDir && !event.Has(Remove) {
+ // Double check to make sure the directory exists. This can
+ // happen when we do a rm -fr on a recursively watched folders
+ // and we receive a modification event first but the folder has
+ // been deleted and later receive the delete event.
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ event.Op |= Remove
+ }
+ }
+
+ if event.Has(Rename) || event.Has(Remove) {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Has(Write) && !event.Has(Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ if !w.sendEvent(event) {
+ closed = true
+ continue
+ }
+ }
+
+ if event.Has(Remove) {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+ }
+ }
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func (w *Watcher) newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ path := filepath.Join(dirPath, fileInfo.Name())
+
+ cleanPath, err := w.internalWatch(path, fileInfo)
+ if err != nil {
+ // No permission to read the file; that's not a problem: just skip.
+ // But do add it to w.fileExists to prevent it from being picked up
+ // as a "new" file later (it still shows up in the directory
+ // listing).
+ switch {
+ case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
+ cleanPath = filepath.Clean(path)
+ default:
+ return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
+ }
+ }
+
+ w.mu.Lock()
+ w.fileExists[cleanPath] = struct{}{}
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// Search the directory for new files and send an event for them.
+//
+// This functionality is to have the BSD watcher match the inotify, which sends
+// a create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dir string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fi := range files {
+ err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ if !w.sendEvent(Event{Name: filePath, Op: Create}) {
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = struct{}{}
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// Register events with the queue.
+func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types.
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // Register the events.
+ success, err := unix.Kevent(w.kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(w.kq, nil, events, nil)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/backend_other.go b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_other.go
new file mode 100644
index 000000000..a9bb1c3c4
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_other.go
@@ -0,0 +1,66 @@
+//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+package fsnotify
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct{}
+
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; attempting to watch it more than once will
+// return an error. Paths that do not yet exist on the filesystem cannot be
+// added. A watch will be automatically removed if the path is deleted.
+//
+// A path will remain watched if it gets renamed to somewhere else on the same
+// filesystem, but the monitor will get removed if the path gets deleted and
+// re-created, or if it's moved to a different filesystem.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many tools update files atomically. Instead of "just" writing
+// to the file a temporary file will be written to first, and if successful the
+// temporary file is moved to to destination removing the original, or some
+// variant thereof. The watcher on the original file is now lost, as it no
+// longer exists.
+//
+// Instead, watch the parent directory and use Event.Name to filter out files
+// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_windows.go
new file mode 100644
index 000000000..ae392867c
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/backend_windows.go
@@ -0,0 +1,746 @@
+//go:build windows
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # macOS notes
+//
+// Spotlight indexing on macOS can result in multiple events (see [#15]). A
+// temporary workaround is to add your folder(s) to the "Spotlight Privacy
+// Settings" until we have a native FSEvents implementation (see [#11]).
+//
+// [#11]: https://github.com/fsnotify/fsnotify/issues/11
+// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, so you
+ // probably want to wait until you've stopped receiving
+ // them (see the dedup example in cmd/fsnotify).
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // and on kqueue when a file is truncated. On Windows
+ // it's never sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ Errors chan error
+
+ port windows.Handle // Handle to completion port
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+
+ mu sync.Mutex // Protects access to watches, isClosed
+ watches watchMap // Map of watches (key: i-number)
+ isClosed bool // Set to true when Close() is first called
+}
+
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
+ if err != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", err)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+
+ event := w.newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+// Returns true if the error was sent, or false if watcher is closed.
+func (w *Watcher) sendError(err error) bool {
+ select {
+ case w.Errors <- err:
+ return true
+ case <-w.quit:
+ }
+ return false
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+ w.mu.Unlock()
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; attempting to watch it more than once will
+// return an error. Paths that do not yet exist on the filesystem cannot be
+// added. A watch will be automatically removed if the path is deleted.
+//
+// A path will remain watched if it gets renamed to somewhere else on the same
+// filesystem, but the monitor will get removed if the path gets deleted and
+// re-created, or if it's moved to a different filesystem.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many tools update files atomically. Instead of "just" writing
+// to the file a temporary file will be written to first, and if successful the
+// temporary file is moved to to destination removing the original, or some
+// variant thereof. The watcher on the original file is now lost, as it no
+// longer exists.
+//
+// Instead, watch the parent directory and use Event.Name to filter out files
+// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return errors.New("watcher already closed")
+ }
+ w.mu.Unlock()
+
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// WatchList returns all paths added with [Add] (and are not yet removed).
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for _, entry := range w.watches {
+ for _, watchEntry := range entry {
+ entries = append(entries, watchEntry.path)
+ }
+ }
+
+ return entries
+}
+
+// These options are from the old golang.org/x/exp/winfsnotify, where you could
+// add various options to the watch. This has long since been removed.
+//
+// The "sys" in the name is misleading as they're not part of any "system".
+//
+// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
+const (
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+ sysFSIGNORED = 0x8000
+)
+
+func (w *Watcher) newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle windows.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov windows.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [65536]byte // 64K buffer
+}
+
+type (
+ indexMap map[uint64]*watch
+ watchMap map[uint32]indexMap
+)
+
+func (w *Watcher) wakeupReader() error {
+ err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if err != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", err)
+ }
+ return nil
+}
+
+func (w *Watcher) getDir(pathname string) (dir string, err error) {
+ attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
+ if err != nil {
+ return "", os.NewSyscallError("GetFileAttributes", err)
+ }
+ if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func (w *Watcher) getIno(path string) (ino *inode, err error) {
+ h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
+ windows.FILE_LIST_DIRECTORY,
+ windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
+ nil, windows.OPEN_EXISTING,
+ windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
+ if err != nil {
+ return nil, os.NewSyscallError("CreateFile", err)
+ }
+
+ var fi windows.ByHandleFileInformation
+ err = windows.GetFileInformationByHandle(h, &fi)
+ if err != nil {
+ windows.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", err)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := w.getDir(pathname)
+ if err != nil {
+ return err
+ }
+
+ ino, err := w.getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
+ if err != nil {
+ windows.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", err)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ windows.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+
+ err = w.startRead(watchEntry)
+ if err != nil {
+ return err
+ }
+
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := w.getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := w.getIno(dir)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+
+ err = windows.CloseHandle(ino.handle)
+ if err != nil {
+ w.sendError(os.NewSyscallError("CloseHandle", err))
+ }
+ if watch == nil {
+ return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ err := windows.CancelIo(watch.ino.handle)
+ if err != nil {
+ w.sendError(os.NewSyscallError("CancelIo", err))
+ w.deleteWatch(watch)
+ }
+ mask := w.toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= w.toWindowsFlags(m)
+ }
+ if mask == 0 {
+ err := windows.CloseHandle(watch.ino.handle)
+ if err != nil {
+ w.sendError(os.NewSyscallError("CloseHandle", err))
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+
+ rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if rdErr != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
+ if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n uint32
+ key uintptr
+ ov *windows.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
+ // This error is handled after the watch == nil check below. NOTE: this
+ // seems odd, note sure if it's correct.
+
+ watch := (*watch)(unsafe.Pointer(ov))
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+
+ err := windows.CloseHandle(w.port)
+ if err != nil {
+ err = os.NewSyscallError("CloseHandle", err)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch qErr {
+ case windows.ERROR_MORE_DATA:
+ if watch == nil {
+ w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case windows.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case windows.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.sendError(errors.New("short read in readEvents()"))
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+
+ // Create a buf that is the size of the path name
+ size := int(raw.FileNameLength / 2)
+ var buf []uint16
+ // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
+ sh.Len = size
+ sh.Cap = size
+ name := windows.UTF16ToString(buf)
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case windows.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case windows.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case windows.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case windows.FILE_ACTION_RENAMED_NEW_NAME:
+ // Update saved path of all sub-watches.
+ old := filepath.Join(watch.path, watch.rename)
+ w.mu.Lock()
+ for _, watchMap := range w.watches {
+ for _, ww := range watchMap {
+ if strings.HasPrefix(ww.path, old) {
+ ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
+ }
+ }
+ }
+ w.mu.Unlock()
+
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ w.sendEvent(fullname, watch.names[name]&mask)
+ }
+ if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == windows.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+
+ w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
+ if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.sendError(errors.New(
+ "Windows system assumed buffer larger than it is, events have likely been missed."))
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.sendError(err)
+ }
+ }
+}
+
+func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSMODIFY != 0 {
+ m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case windows.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case windows.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case windows.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case windows.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case windows.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/test/integration/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 000000000..30a5bf0f0
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,81 @@
+//go:build !plan9
+// +build !plan9
+
+// Package fsnotify provides a cross-platform interface for file system
+// notifications.
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Event represents a file system notification.
+type Event struct {
+ // Path to the file or directory.
+ //
+ // Paths are relative to the input; for example with Add("dir") the Name
+ // will be set to "dir/file" if you create that file, but if you use
+ // Add("/path/to/dir") it will be "/path/to/dir/file".
+ Name string
+
+ // File operation that triggered the event.
+ //
+ // This is a bitmask and some systems may send multiple operations at once.
+ // Use the Event.Has() method instead of comparing with ==.
+ Op Op
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// The operations fsnotify can trigger; see the documentation on [Watcher] for a
+// full description, and check them with [Event.Has].
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+// Common errors that can be reported by a watcher
+var (
+ ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
+ ErrEventOverflow = errors.New("fsnotify queue overflow")
+)
+
+func (op Op) String() string {
+ var b strings.Builder
+ if op.Has(Create) {
+ b.WriteString("|CREATE")
+ }
+ if op.Has(Remove) {
+ b.WriteString("|REMOVE")
+ }
+ if op.Has(Write) {
+ b.WriteString("|WRITE")
+ }
+ if op.Has(Rename) {
+ b.WriteString("|RENAME")
+ }
+ if op.Has(Chmod) {
+ b.WriteString("|CHMOD")
+ }
+ if b.Len() == 0 {
+ return "[no events]"
+ }
+ return b.String()[1:]
+}
+
+// Has reports if this operation has the given operation.
+func (o Op) Has(h Op) bool { return o&h == h }
+
+// Has reports if this event has the given operation.
+func (e Event) Has(op Op) bool { return e.Op.Has(op) }
+
+// String returns a string representation of the event with their path.
+func (e Event) String() string {
+ return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
+}
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/test/integration/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
new file mode 100644
index 000000000..b09ef7683
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
@@ -0,0 +1,208 @@
+#!/usr/bin/env zsh
+[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
+setopt err_exit no_unset pipefail extended_glob
+
+# Simple script to update the godoc comments on all watchers. Probably took me
+# more time to write this than doing it manually, but ah well 🙃
+
+watcher=$(</tmp/x
+ print -r -- $cmt >>/tmp/x
+ tail -n+$(( end + 1 )) $file >>/tmp/x
+ mv /tmp/x $file
+ done
+}
+
+set-cmt '^type Watcher struct ' $watcher
+set-cmt '^func NewWatcher(' $new
+set-cmt '^func (w \*Watcher) Add(' $add
+set-cmt '^func (w \*Watcher) Remove(' $remove
+set-cmt '^func (w \*Watcher) Close(' $close
+set-cmt '^func (w \*Watcher) WatchList(' $watchlist
+set-cmt '^[[:space:]]*Events *chan Event$' $events
+set-cmt '^[[:space:]]*Errors *chan error$' $errors
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/test/integration/vendor/github.com/fsnotify/fsnotify/system_bsd.go
new file mode 100644
index 000000000..4322b0b88
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/system_bsd.go
@@ -0,0 +1,8 @@
+//go:build freebsd || openbsd || netbsd || dragonfly
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
diff --git a/test/integration/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/test/integration/vendor/github.com/fsnotify/fsnotify/system_darwin.go
new file mode 100644
index 000000000..5da5ffa78
--- /dev/null
+++ b/test/integration/vendor/github.com/fsnotify/fsnotify/system_darwin.go
@@ -0,0 +1,9 @@
+//go:build darwin
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go
new file mode 100644
index 000000000..ad35f09a8
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_retry
+
+import (
+ "time"
+
+ "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils"
+)
+
+// BackoffLinear is very simple: it waits for a fixed period of time between calls.
+func BackoffLinear(waitBetween time.Duration) BackoffFunc {
+ return func(attempt uint) time.Duration {
+ return waitBetween
+ }
+}
+
+// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
+//
+// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
+func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc {
+ return func(attempt uint) time.Duration {
+ return backoffutils.JitterUp(waitBetween, jitterFraction)
+ }
+}
+
+// BackoffExponential produces increasing intervals for each attempt.
+//
+// The scalar is multiplied times 2 raised to the current attempt. So the first
+// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s.
+func BackoffExponential(scalar time.Duration) BackoffFunc {
+ return func(attempt uint) time.Duration {
+ return scalar * time.Duration(backoffutils.ExponentBase2(attempt))
+ }
+}
+
+// BackoffExponentialWithJitter creates an exponential backoff like
+// BackoffExponential does, but adds jitter.
+func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc {
+ return func(attempt uint) time.Duration {
+ return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction)
+ }
+}
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
new file mode 100644
index 000000000..afd924a14
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
@@ -0,0 +1,25 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+/*
+`grpc_retry` provides client-side request retry logic for gRPC.
+
+Client-Side Request Retry Interceptor
+
+It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status
+of the reply. It supports unary (1:1), and server stream (1:n) requests.
+
+By default the interceptors *are disabled*, preventing accidental use of retries. You can easily
+override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.:
+
+ myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5))
+
+Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms
+linear backoff with 10% jitter.
+
+For chained interceptors, the retry interceptor will call every interceptor that follows it
+whenever when a retry happens.
+
+Please see examples for more advanced use.
+*/
+package grpc_retry
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go
new file mode 100644
index 000000000..7a633e293
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go
@@ -0,0 +1,142 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_retry
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+var (
+ // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able.
+ //
+ // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached.
+ // `Unavailable` means that system is currently unavailable and the client should retry again.
+ DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable}
+
+ defaultOptions = &options{
+ max: 0, // disabled
+ perCallTimeout: 0, // disabled
+ includeHeader: true,
+ codes: DefaultRetriableCodes,
+ backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration {
+ return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt)
+ }),
+ }
+)
+
+// BackoffFunc denotes a family of functions that control the backoff duration between call retries.
+//
+// They are called with an identifier of the attempt, and should return a time the system client should
+// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
+// the deadline of the request takes precedence and the wait will be interrupted before proceeding
+// with the next iteration.
+type BackoffFunc func(attempt uint) time.Duration
+
+// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries.
+//
+// They are called with an identifier of the attempt, and should return a time the system client should
+// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
+// the deadline of the request takes precedence and the wait will be interrupted before proceeding
+// with the next iteration. The context can be used to extract request scoped metadata and context values.
+type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration
+
+// Disable disables the retry behaviour on this call, or this interceptor.
+//
+// Its semantically the same to `WithMax`
+func Disable() CallOption {
+ return WithMax(0)
+}
+
+// WithMax sets the maximum number of retries on this call, or this interceptor.
+func WithMax(maxRetries uint) CallOption {
+ return CallOption{applyFunc: func(o *options) {
+ o.max = maxRetries
+ }}
+}
+
+// WithBackoff sets the `BackoffFunc` used to control time between retries.
+func WithBackoff(bf BackoffFunc) CallOption {
+ return CallOption{applyFunc: func(o *options) {
+ o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration {
+ return bf(attempt)
+ })
+ }}
+}
+
+// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries.
+func WithBackoffContext(bf BackoffFuncContext) CallOption {
+ return CallOption{applyFunc: func(o *options) {
+ o.backoffFunc = bf
+ }}
+}
+
+// WithCodes sets which codes should be retried.
+//
+// Please *use with care*, as you may be retrying non-idempotent calls.
+//
+// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these.
+func WithCodes(retryCodes ...codes.Code) CallOption {
+ return CallOption{applyFunc: func(o *options) {
+ o.codes = retryCodes
+ }}
+}
+
+// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor.
+//
+// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation
+// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call.
+//
+// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each
+// of the retry calls (including the initial one) will have a deadline of now + 3s.
+//
+// A value of 0 disables the timeout overrides completely and returns to each retry call using the
+// parent `context.Deadline`.
+//
+// Note that when this is enabled, any DeadlineExceeded errors that are propagated up will be retried.
+func WithPerRetryTimeout(timeout time.Duration) CallOption {
+ return CallOption{applyFunc: func(o *options) {
+ o.perCallTimeout = timeout
+ }}
+}
+
+type options struct {
+ max uint
+ perCallTimeout time.Duration
+ includeHeader bool
+ codes []codes.Code
+ backoffFunc BackoffFuncContext
+}
+
+// CallOption is a grpc.CallOption that is local to grpc_retry.
+type CallOption struct {
+ grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
+ applyFunc func(opt *options)
+}
+
+func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options {
+ if len(callOptions) == 0 {
+ return opt
+ }
+ optCopy := &options{}
+ *optCopy = *opt
+ for _, f := range callOptions {
+ f.applyFunc(optCopy)
+ }
+ return optCopy
+}
+
+func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) {
+ for _, opt := range callOptions {
+ if co, ok := opt.(CallOption); ok {
+ retryOptions = append(retryOptions, co)
+ } else {
+ grpcOptions = append(grpcOptions, opt)
+ }
+ }
+ return grpcOptions, retryOptions
+}
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
new file mode 100644
index 000000000..62d831201
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
@@ -0,0 +1,329 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_retry
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+const (
+ AttemptMetadataKey = "x-retry-attempty"
+)
+
+// UnaryClientInterceptor returns a new retrying unary client interceptor.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor {
+ intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+ return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ grpcOpts, retryOpts := filterCallOptions(opts)
+ callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+ // short circuit for simplicity, and avoiding allocations.
+ if callOpts.max == 0 {
+ return invoker(parentCtx, method, req, reply, cc, grpcOpts...)
+ }
+ var lastErr error
+ for attempt := uint(0); attempt < callOpts.max; attempt++ {
+ if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil {
+ return err
+ }
+ callCtx := perCallContext(parentCtx, callOpts, attempt)
+ lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...)
+ // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+ if lastErr == nil {
+ return nil
+ }
+ logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr)
+ if isContextError(lastErr) {
+ if parentCtx.Err() != nil {
+ logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err())
+ // its the parent context deadline or cancellation.
+ return lastErr
+ } else if callOpts.perCallTimeout != 0 {
+ // We have set a perCallTimeout in the retry middleware, which would result in a context error if
+ // the deadline was exceeded, in which case try again.
+ logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt)
+ continue
+ }
+ }
+ if !isRetriable(lastErr, callOpts) {
+ return lastErr
+ }
+ }
+ return lastErr
+ }
+}
+
+// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+//
+// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
+// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
+// BidiStreams), the retry interceptor will fail the call.
+func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor {
+ intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+ return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ grpcOpts, retryOpts := filterCallOptions(opts)
+ callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+ // short circuit for simplicity, and avoiding allocations.
+ if callOpts.max == 0 {
+ return streamer(parentCtx, desc, cc, method, grpcOpts...)
+ }
+ if desc.ClientStreams {
+ return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()")
+ }
+
+ var lastErr error
+ for attempt := uint(0); attempt < callOpts.max; attempt++ {
+ if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil {
+ return nil, err
+ }
+ callCtx := perCallContext(parentCtx, callOpts, 0)
+
+ var newStreamer grpc.ClientStream
+ newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...)
+ if lastErr == nil {
+ retryingStreamer := &serverStreamingRetryingStream{
+ ClientStream: newStreamer,
+ callOpts: callOpts,
+ parentCtx: parentCtx,
+ streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
+ return streamer(ctx, desc, cc, method, grpcOpts...)
+ },
+ }
+ return retryingStreamer, nil
+ }
+
+ logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr)
+ if isContextError(lastErr) {
+ if parentCtx.Err() != nil {
+ logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err())
+ // its the parent context deadline or cancellation.
+ return nil, lastErr
+ } else if callOpts.perCallTimeout != 0 {
+ // We have set a perCallTimeout in the retry middleware, which would result in a context error if
+ // the deadline was exceeded, in which case try again.
+ logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt)
+ continue
+ }
+ }
+ if !isRetriable(lastErr, callOpts) {
+ return nil, lastErr
+ }
+ }
+ return nil, lastErr
+ }
+}
+
+// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
+// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
+// a new ClientStream according to the retry policy.
+type serverStreamingRetryingStream struct {
+ grpc.ClientStream
+ bufferedSends []interface{} // single message that the client can sen
+ receivedGood bool // indicates whether any prior receives were successful
+ wasClosedSend bool // indicates that CloseSend was closed
+ parentCtx context.Context
+ callOpts *options
+ streamerCall func(ctx context.Context) (grpc.ClientStream, error)
+ mu sync.RWMutex
+}
+
+func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
+ s.mu.Lock()
+ s.ClientStream = clientStream
+ s.mu.Unlock()
+}
+
+func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.ClientStream
+}
+
+func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
+ s.mu.Lock()
+ s.bufferedSends = append(s.bufferedSends, m)
+ s.mu.Unlock()
+ return s.getStream().SendMsg(m)
+}
+
+func (s *serverStreamingRetryingStream) CloseSend() error {
+ s.mu.Lock()
+ s.wasClosedSend = true
+ s.mu.Unlock()
+ return s.getStream().CloseSend()
+}
+
+func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
+ return s.getStream().Header()
+}
+
+func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
+ return s.getStream().Trailer()
+}
+
+func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
+ attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
+ if !attemptRetry {
+ return lastErr // success or hard failure
+ }
+ // We start off from attempt 1, because zeroth was already made on normal SendMsg().
+ for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
+ if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil {
+ return err
+ }
+ callCtx := perCallContext(s.parentCtx, s.callOpts, attempt)
+ newStream, err := s.reestablishStreamAndResendBuffer(callCtx)
+ if err != nil {
+ // Retry dial and transport errors of establishing stream as grpc doesn't retry.
+ if isRetriable(err, s.callOpts) {
+ continue
+ }
+ return err
+ }
+
+ s.setStream(newStream)
+ attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
+ //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
+ if !attemptRetry {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
+ s.mu.RLock()
+ wasGood := s.receivedGood
+ s.mu.RUnlock()
+ err := s.getStream().RecvMsg(m)
+ if err == nil || err == io.EOF {
+ s.mu.Lock()
+ s.receivedGood = true
+ s.mu.Unlock()
+ return false, err
+ } else if wasGood {
+ // previous RecvMsg in the stream succeeded, no retry logic should interfere
+ return false, err
+ }
+ if isContextError(err) {
+ if s.parentCtx.Err() != nil {
+ logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err())
+ return false, err
+ } else if s.callOpts.perCallTimeout != 0 {
+ // We have set a perCallTimeout in the retry middleware, which would result in a context error if
+ // the deadline was exceeded, in which case try again.
+ logTrace(s.parentCtx, "grpc_retry context error from retry call")
+ return true, err
+ }
+ }
+ return isRetriable(err, s.callOpts), err
+}
+
+func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(
+ callCtx context.Context,
+) (grpc.ClientStream, error) {
+ s.mu.RLock()
+ bufferedSends := s.bufferedSends
+ s.mu.RUnlock()
+ newStream, err := s.streamerCall(callCtx)
+ if err != nil {
+ logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err)
+ return nil, err
+ }
+ for _, msg := range bufferedSends {
+ if err := newStream.SendMsg(msg); err != nil {
+ logTrace(callCtx, "grpc_retry failed resending message: %v", err)
+ return nil, err
+ }
+ }
+ if err := newStream.CloseSend(); err != nil {
+ logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err)
+ return nil, err
+ }
+ return newStream, nil
+}
+
+func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error {
+ var waitTime time.Duration = 0
+ if attempt > 0 {
+ waitTime = callOpts.backoffFunc(parentCtx, attempt)
+ }
+ if waitTime > 0 {
+ logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime)
+ timer := time.NewTimer(waitTime)
+ select {
+ case <-parentCtx.Done():
+ timer.Stop()
+ return contextErrToGrpcErr(parentCtx.Err())
+ case <-timer.C:
+ }
+ }
+ return nil
+}
+
+func isRetriable(err error, callOpts *options) bool {
+ errCode := status.Code(err)
+ if isContextError(err) {
+ // context errors are not retriable based on user settings.
+ return false
+ }
+ for _, code := range callOpts.codes {
+ if code == errCode {
+ return true
+ }
+ }
+ return false
+}
+
+func isContextError(err error) bool {
+ code := status.Code(err)
+ return code == codes.DeadlineExceeded || code == codes.Canceled
+}
+
+func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context {
+ ctx := parentCtx
+ if callOpts.perCallTimeout != 0 {
+ ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout)
+ }
+ if attempt > 0 && callOpts.includeHeader {
+ mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt))
+ ctx = mdClone.ToOutgoing(ctx)
+ }
+ return ctx
+}
+
+func contextErrToGrpcErr(err error) error {
+ switch err {
+ case context.DeadlineExceeded:
+ return status.Error(codes.DeadlineExceeded, err.Error())
+ case context.Canceled:
+ return status.Error(codes.Canceled, err.Error())
+ default:
+ return status.Error(codes.Unknown, err.Error())
+ }
+}
+
+func logTrace(ctx context.Context, format string, a ...interface{}) {
+ tr, ok := trace.FromContext(ctx)
+ if !ok {
+ return
+ }
+ tr.LazyPrintf(format, a...)
+}
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go
new file mode 100644
index 000000000..4e69a6305
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go
@@ -0,0 +1,28 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+/*
+Backoff Helper Utilities
+
+Implements common backoff features.
+*/
+package backoffutils
+
+import (
+ "math/rand"
+ "time"
+)
+
+// JitterUp adds random jitter to the duration.
+//
+// This adds or subtracts time from the duration within a given jitter fraction.
+// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
+func JitterUp(duration time.Duration, jitter float64) time.Duration {
+ multiplier := jitter * (rand.Float64()*2 - 1)
+ return time.Duration(float64(duration) * (1 + multiplier))
+}
+
+// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0.
+func ExponentBase2(a uint) uint {
+ return (1 << a) >> 1
+}
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go
new file mode 100644
index 000000000..1ed9bb499
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go
@@ -0,0 +1,19 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+/*
+Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside
+Context handlers.
+
+While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md)
+they are hard to use.
+
+The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example
+the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context
+metadata.
+
+ nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom")
+ clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx)
+*/
+
+package metautils
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
new file mode 100644
index 000000000..1c60585dd
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
@@ -0,0 +1,126 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package metautils
+
+import (
+ "context"
+ "strings"
+
+ "google.golang.org/grpc/metadata"
+)
+
+// NiceMD is a convenience wrapper definiting extra functions on the metadata.
+type NiceMD metadata.MD
+
+// ExtractIncoming extracts an inbound metadata from the server-side context.
+//
+// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns
+// a new empty NiceMD.
+func ExtractIncoming(ctx context.Context) NiceMD {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return NiceMD(metadata.Pairs())
+ }
+ return NiceMD(md)
+}
+
+// ExtractOutgoing extracts an outbound metadata from the client-side context.
+//
+// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns
+// a new empty NiceMD.
+func ExtractOutgoing(ctx context.Context) NiceMD {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ return NiceMD(metadata.Pairs())
+ }
+ return NiceMD(md)
+}
+
+// Clone performs a *deep* copy of the metadata.MD.
+//
+// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted
+// all keys get copied.
+func (m NiceMD) Clone(copiedKeys ...string) NiceMD {
+ newMd := NiceMD(metadata.Pairs())
+ for k, vv := range m {
+ found := false
+ if len(copiedKeys) == 0 {
+ found = true
+ } else {
+ for _, allowedKey := range copiedKeys {
+ if strings.EqualFold(allowedKey, k) {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ continue
+ }
+ newMd[k] = make([]string, len(vv))
+ copy(newMd[k], vv)
+ }
+ return NiceMD(newMd)
+}
+
+// ToOutgoing sets the given NiceMD as a client-side context for dispatching.
+func (m NiceMD) ToOutgoing(ctx context.Context) context.Context {
+ return metadata.NewOutgoingContext(ctx, metadata.MD(m))
+}
+
+// ToIncoming sets the given NiceMD as a server-side context for dispatching.
+//
+// This is mostly useful in ServerInterceptors..
+func (m NiceMD) ToIncoming(ctx context.Context) context.Context {
+ return metadata.NewIncomingContext(ctx, metadata.MD(m))
+}
+
+// Get retrieves a single value from the metadata.
+//
+// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set,
+// an empty string is returned.
+//
+// The function is binary-key safe.
+func (m NiceMD) Get(key string) string {
+ k := strings.ToLower(key)
+ vv, ok := m[k]
+ if !ok {
+ return ""
+ }
+ return vv[0]
+}
+
+// Del retrieves a single value from the metadata.
+//
+// It works analogously to http.Header.Del, deleting all values if they exist.
+//
+// The function is binary-key safe.
+
+func (m NiceMD) Del(key string) NiceMD {
+ k := strings.ToLower(key)
+ delete(m, k)
+ return m
+}
+
+// Set sets the given value in a metadata.
+//
+// It works analogously to http.Header.Set, overwriting all previous metadata values.
+//
+// The function is binary-key safe.
+func (m NiceMD) Set(key string, value string) NiceMD {
+ k := strings.ToLower(key)
+ m[k] = []string{value}
+ return m
+}
+
+// Add retrieves a single value from the metadata.
+//
+// It works analogously to http.Header.Add, as it appends to any existing values associated with key.
+//
+// The function is binary-key safe.
+func (m NiceMD) Add(key string, value string) NiceMD {
+ k := strings.ToLower(key)
+ m[k] = append(m[k], value)
+ return m
+}
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/doc.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/doc.go
new file mode 100644
index 000000000..7205520a2
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+/*
+`grpc_validator` is a generic request contents validator server-side middleware for gRPC.
+
+Request Validator Middleware
+
+Validating input is important, and hard. It also causes a lot of boilerplate code. This middleware
+checks for the existence of a `Validate` method on each of the messages of a gRPC request. This
+includes the single request of the `Unary` calls, as well as each message of the inbound Stream calls.
+In case of a validation failure, an `InvalidArgument` gRPC status is returned, along with a
+description of the validation failure.
+
+While it is generic, it was intended to be used with https://github.com/mwitkow/go-proto-validators,
+a Go protocol buffers codegen plugin that creates the `Validate` methods (including nested messages)
+based on declarative options in the `.proto` files themselves. For example:
+
+
+ syntax = "proto3";
+ package validator.examples;
+ import "github.com/mwitkow/go-proto-validators/validator.proto";
+
+ message InnerMessage {
+ // some_integer can only be in range (1, 100).
+ int32 some_integer = 1 [(validator.field) = {int_gt: 0, int_lt: 100}];
+ // some_float can only be in range (0;1).
+ double some_float = 2 [(validator.field) = {float_gte: 0, float_lte: 1}];
+ }
+
+ message OuterMessage {
+ // important_string must be a lowercase alpha-numeric of 5 to 30 characters (RE2 syntax).
+ string important_string = 1 [(validator.field) = {regex: "^[a-z]{2,5}$"}];
+ // proto3 doesn't have `required`, the `msg_exist` enforces presence of InnerMessage.
+ InnerMessage inner = 2 [(validator.field) = {msg_exists : true}];
+ }
+
+The `OuterMessage.Validate` would include validation of regexes, existence of the InnerMessage and
+the range values within it. The `grpc_validator` middleware would then automatically use that to
+check all messages processed by the server.
+
+Please consult https://github.com/mwitkow/go-proto-validators for details on `protoc` invocation and
+other parameters of customization.
+*/
+package grpc_validator
diff --git a/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/validator.go b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/validator.go
new file mode 100644
index 000000000..7e1e413d1
--- /dev/null
+++ b/test/integration/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/validator.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_validator
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// The validate interface starting with protoc-gen-validate v0.6.0.
+// See https://github.com/envoyproxy/protoc-gen-validate/pull/455.
+type validator interface {
+ Validate(all bool) error
+}
+
+// The validate interface prior to protoc-gen-validate v0.6.0.
+type validatorLegacy interface {
+ Validate() error
+}
+
+func validate(req interface{}) error {
+ switch v := req.(type) {
+ case validatorLegacy:
+ if err := v.Validate(); err != nil {
+ return status.Error(codes.InvalidArgument, err.Error())
+ }
+ case validator:
+ if err := v.Validate(false); err != nil {
+ return status.Error(codes.InvalidArgument, err.Error())
+ }
+ }
+ return nil
+}
+
+// UnaryServerInterceptor returns a new unary server interceptor that validates incoming messages.
+//
+// Invalid messages will be rejected with `InvalidArgument` before reaching any userspace handlers.
+func UnaryServerInterceptor() grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ if err := validate(req); err != nil {
+ return nil, err
+ }
+ return handler(ctx, req)
+ }
+}
+
+// UnaryClientInterceptor returns a new unary client interceptor that validates outgoing messages.
+//
+// Invalid messages will be rejected with `InvalidArgument` before sending the request to server.
+func UnaryClientInterceptor() grpc.UnaryClientInterceptor {
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ if err := validate(req); err != nil {
+ return err
+ }
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+}
+
+// StreamServerInterceptor returns a new streaming server interceptor that validates incoming messages.
+//
+// The stage at which invalid messages will be rejected with `InvalidArgument` varies based on the
+// type of the RPC. For `ServerStream` (1:m) requests, it will happen before reaching any userspace
+// handlers. For `ClientStream` (n:1) or `BidiStream` (n:m) RPCs, the messages will be rejected on
+// calls to `stream.Recv()`.
+func StreamServerInterceptor() grpc.StreamServerInterceptor {
+ return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ wrapper := &recvWrapper{stream}
+ return handler(srv, wrapper)
+ }
+}
+
+type recvWrapper struct {
+ grpc.ServerStream
+}
+
+func (s *recvWrapper) RecvMsg(m interface{}) error {
+ if err := s.ServerStream.RecvMsg(m); err != nil {
+ return err
+ }
+
+ if err := validate(m); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/.gitignore b/test/integration/vendor/github.com/hashicorp/hcl/.gitignore
new file mode 100644
index 000000000..15586a2b5
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/.gitignore
@@ -0,0 +1,9 @@
+y.output
+
+# ignore intellij files
+.idea
+*.iml
+*.ipr
+*.iws
+
+*.test
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/.travis.yml b/test/integration/vendor/github.com/hashicorp/hcl/.travis.yml
new file mode 100644
index 000000000..cb63a3216
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -0,0 +1,13 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.x
+ - tip
+
+branches:
+ only:
+ - master
+
+script: make test
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/LICENSE b/test/integration/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 000000000..c33dcc7c9
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/Makefile b/test/integration/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 000000000..84fd743f5
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,18 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+ go fmt ./...
+
+test: generate
+ go get -t ./...
+ go test $(TEST) $(TESTARGS)
+
+generate:
+ go generate ./...
+
+updatedeps:
+ go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/README.md b/test/integration/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 000000000..c8223326d
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[](https://godoc.org/github.com/hashicorp/hcl) [](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<-
+ echo %Path%
+
+ go version
+
+ go env
+
+ go get -t ./...
+
+build_script:
+- cmd: go test -v ./...
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/decoder.go b/test/integration/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 000000000..bed9ebbe1
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,729 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float32, reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok && item != nil {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+ // Ignore fields with tag name "-"
+ if tagParts[0] == "-" {
+ continue
+ }
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, structVal.Field(i)})
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ fieldValue.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, fieldValue)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, field.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 000000000..575a20b50
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 000000000..6e5ef654b
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ // I'm not entirely sure what causes this, but removing this causes
+ // a test failure. We should investigate at some point.
+ if len(o.Keys) == 0 {
+ return token.Pos{}
+ }
+
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 000000000..ba07ad42b
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 000000000..5c99381df
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 000000000..64c83bcfb
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ // normalize all line endings
+ // since the scanner and output only work with "\n" line endings, we may
+ // end up with dangling "\r" characters in the parsed data.
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList(false)
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // object lists can be optionally comma-delimited e.g. when a list of maps
+ // is being expressed, so a comma is allowed here - it's simply consumed
+ tok := p.scan()
+ if tok.Type != token.COMMA {
+ p.unscan()
+ }
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
+ }
+
+ // key=#comment
+ // val
+ if p.lineComment != nil {
+ o.LineComment, p.lineComment = p.lineComment, nil
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ var err error
+
+ // If we have no keys, then it is a syntax error. i.e. {{}} is not
+ // allowed.
+ if len(keys) == 0 {
+ err = &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+ }
+ }
+
+ // object
+ return keys, err
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList(true)
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ if needComma {
+ switch tok.Type {
+ case token.COMMA, token.RBRACK:
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error parsing list, expected comma or list end, got: %s",
+ tok.Type),
+ }
+ }
+ }
+ switch tok.Type {
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil && len(l.List) > 0 {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.LBRACE:
+ // Looks like a nested object, so parse it out
+ node, err := p.objectType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse object within list: %s", err),
+ }
+ }
+ l.Add(node)
+ needComma = true
+ case token.LBRACK:
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
new file mode 100644
index 000000000..7c038d12a
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+ blank = byte(' ')
+ newline = byte('\n')
+ tab = byte('\t')
+ infinity = 1 << 30 // offset or line
+)
+
+var (
+ unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+ cfg Config
+ prev token.Pos
+
+ comments []*ast.CommentGroup // may be nil, contains all comments
+ standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+ enableTrace bool
+ indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int { return len(b) }
+func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+ // first collect all comments. This is already stored in
+ // ast.File.(comments)
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.File:
+ p.comments = t.Comments
+ return nn, false
+ }
+ return nn, true
+ })
+
+ standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+ for _, c := range p.comments {
+ standaloneComments[c.Pos()] = c
+ }
+
+ // next remove all lead and line comments from the overall comment map.
+ // This will give us comments which are standalone, comments which are not
+ // assigned to any kind of node.
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.LiteralType:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ case *ast.ObjectItem:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ }
+
+ return nn, true
+ })
+
+ for _, c := range standaloneComments {
+ p.standaloneComments = append(p.standaloneComments, c)
+ }
+
+ sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+ var buf bytes.Buffer
+
+ switch t := n.(type) {
+ case *ast.File:
+ // File doesn't trace so we add the tracing here
+ defer un(trace(p, "File"))
+ return p.output(t.Node)
+ case *ast.ObjectList:
+ defer un(trace(p, "ObjectList"))
+
+ var index int
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is at "infinity"
+ var nextItem token.Pos
+ if index != len(t.Items) {
+ nextItem = t.Items[index].Pos()
+ } else {
+ nextItem = token.Pos{Offset: infinity, Line: infinity}
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ // Go through all the comments in the group. The group
+ // should be printed together, not separated by double newlines.
+ printed := false
+ newlinePrinted := false
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // if we hit the end add newlines so we can print the comment
+ // we don't do this if prev is invalid which means the
+ // beginning of the file since the first comment should
+ // be at the first line.
+ if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+ buf.Write([]byte{newline, newline})
+ newlinePrinted = true
+ }
+
+ // Write the actual comment.
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+
+ // Set printed to true to note that we printed something
+ printed = true
+ }
+ }
+
+ // If we're not at the last item, write a new line so
+ // that there is a newline separating this comment from
+ // the next object.
+ if printed && index != len(t.Items) {
+ buf.WriteByte(newline)
+ }
+ }
+
+ if index == len(t.Items) {
+ break
+ }
+
+ buf.Write(p.output(t.Items[index]))
+ if index != len(t.Items)-1 {
+ // Always write a newline to separate us from the next item
+ buf.WriteByte(newline)
+
+ // Need to determine if we're going to separate the next item
+ // with a blank line. The logic here is simple, though there
+ // are a few conditions:
+ //
+ // 1. The next object is more than one line away anyways,
+ // so we need an empty line.
+ //
+ // 2. The next object is not a "single line" object, so
+ // we need an empty line.
+ //
+ // 3. This current object is not a single line object,
+ // so we need an empty line.
+ current := t.Items[index]
+ next := t.Items[index+1]
+ if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+ !p.isSingleLineObject(next) ||
+ !p.isSingleLineObject(current) {
+ buf.WriteByte(newline)
+ }
+ }
+ index++
+ }
+ case *ast.ObjectKey:
+ buf.WriteString(t.Token.Text)
+ case *ast.ObjectItem:
+ p.prev = t.Pos()
+ buf.Write(p.objectItem(t))
+ case *ast.LiteralType:
+ buf.Write(p.literalType(t))
+ case *ast.ListType:
+ buf.Write(p.list(t))
+ case *ast.ObjectType:
+ buf.Write(p.objectType(t))
+ default:
+ fmt.Printf(" unknown type: %T\n", n)
+ }
+
+ return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+ result := []byte(lit.Token.Text)
+ switch lit.Token.Type {
+ case token.HEREDOC:
+ // Clear the trailing newline from heredocs
+ if result[len(result)-1] == '\n' {
+ result = result[:len(result)-1]
+ }
+
+ // Poison lines 2+ so that we don't indent them
+ result = p.heredocIndent(result)
+ case token.STRING:
+ // If this is a multiline string, poison lines 2+ so we don't
+ // indent them.
+ if bytes.IndexRune(result, '\n') >= 0 {
+ result = p.heredocIndent(result)
+ }
+ }
+
+ return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+ defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+ var buf bytes.Buffer
+
+ if o.LeadComment != nil {
+ for _, comment := range o.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ // If key and val are on different lines, treat line comments like lead comments.
+ if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range o.Keys {
+ buf.WriteString(k.Token.Text)
+ buf.WriteByte(blank)
+
+ // reach end of key
+ if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ buf.Write(p.output(o.Val))
+
+ if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+ buf.WriteByte(blank)
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+ defer un(trace(p, "ObjectType"))
+ var buf bytes.Buffer
+ buf.WriteString("{")
+
+ var index int
+ var nextItem token.Pos
+ var commented, newlinePrinted bool
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is the closing brace
+ if index != len(o.List.Items) {
+ nextItem = o.List.Items[index].Pos()
+ } else {
+ nextItem = o.Rbrace
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ printed := false
+ var lastCommentPos token.Pos
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // If there are standalone comments and the initial newline has not
+ // been printed yet, do it now.
+ if !newlinePrinted {
+ newlinePrinted = true
+ buf.WriteByte(newline)
+ }
+
+ // add newline if it's between other printed nodes
+ if index > 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ // Store this position
+ lastCommentPos = comment.Pos()
+
+ // output the comment itself
+ buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+ // Set printed to true to note that we printed something
+ printed = true
+
+ /*
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ */
+ }
+ }
+
+ // Stuff to do if we had comments
+ if printed {
+ // Always write a newline
+ buf.WriteByte(newline)
+
+ // If there is another item in the object and our comment
+ // didn't hug it directly, then make sure there is a blank
+ // line separating them.
+ if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+ buf.WriteByte(newline)
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ if p.isSingleLineList(l) {
+ return p.singleLineList(l)
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("[")
+ buf.WriteByte(newline)
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ haveEmptyLine := false
+ for i, item := range l.List {
+ // If we have a lead comment, then we want to write that first
+ leadComment := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+ leadComment = true
+
+ // Ensure an empty line before every element with a
+ // lead comment (except the first item in a list).
+ if !haveEmptyLine && i != 0 {
+ buf.WriteByte(newline)
+ }
+
+ for _, comment := range lit.LeadComment.List {
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ }
+ }
+
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+
+ // if this item is a heredoc, then we output the comma on
+ // the next line. This is the only case this happens.
+ comma := []byte{','}
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ comma = p.indent(comma)
+ }
+
+ buf.Write(comma)
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ buf.WriteByte(newline)
+
+ // Ensure an empty line after every element with a
+ // lead comment (except the first item in a list).
+ haveEmptyLine = leadComment && i != len(l.List)-1
+ if haveEmptyLine {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+ for _, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ return false
+ }
+
+ lit, ok := item.(*ast.LiteralType)
+ if !ok {
+ return false
+ }
+
+ if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+ return false
+ }
+
+ if lit.LineComment != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+ buf := &bytes.Buffer{}
+
+ buf.WriteString("[")
+ for i, item := range l.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+
+ // Output the item itself
+ buf.Write(p.output(item))
+
+ // The heredoc marker needs to be at the end of line.
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+// * has no lead comments (hence multi-line)
+// * has no assignment
+// * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+ // If there is a lead comment, can't be one line
+ if val.LeadComment != nil {
+ return false
+ }
+
+ // If there is assignment, we always break by line
+ if val.Assign.IsValid() {
+ return false
+ }
+
+ // If it isn't an object type, then its not a single line object
+ ot, ok := val.Val.(*ast.ObjectType)
+ if !ok {
+ return false
+ }
+
+ // If the object has no items, it is single line!
+ return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 000000000..6617ab8e7
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ // Add trailing newline to result
+ buf.WriteString("\n")
+ return buf.Bytes(), nil
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 000000000..624a18fe3
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == utf8.RuneError && size == 1 {
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ if ch == '\x00' {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
+ if ch == '\uE123' {
+ s.err("unicode code point U+E123 reserved for internal use")
+ return utf8.RuneError
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ if ch == '/' && s.peek() != '/' {
+ s.err("expected '/' for comment")
+ return
+ }
+
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ start := n
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ if ch == eof {
+ // If we see an EOF, we halt any more scanning of digits
+ // immediately.
+ break
+ }
+
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ if n != start && ch != eof {
+ // we scanned all digits, put the last non digit char back,
+ // only if we read anything at all
+ s.unread()
+ }
+
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 000000000..5f981eaa2
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/test/integration/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 000000000..e37c0664e
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // < 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/test/integration/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 000000000..125a5f072
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ return nil, errors.New("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/test/integration/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 000000000..fe3f0f095
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/json/token/position.go b/test/integration/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/json/token/token.go b/test/integration/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 000000000..95a0c3eee
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/lex.go b/test/integration/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 000000000..d9993c292
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/test/integration/vendor/github.com/hashicorp/hcl/parse.go b/test/integration/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 000000000..1fca53c4c
--- /dev/null
+++ b/test/integration/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/.gitignore b/test/integration/vendor/github.com/klauspost/cpuid/v2/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/test/integration/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
new file mode 100644
index 000000000..944cc0007
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml
@@ -0,0 +1,74 @@
+# This is an example goreleaser.yaml file with some sane defaults.
+# Make sure to check the documentation at http://goreleaser.com
+
+builds:
+ -
+ id: "cpuid"
+ binary: cpuid
+ main: ./cmd/cpuid/main.go
+ env:
+ - CGO_ENABLED=0
+ flags:
+ - -ldflags=-s -w
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm64
+ goarm:
+ - 7
+
+archives:
+ -
+ id: cpuid
+ name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
+ replacements:
+ aix: AIX
+ darwin: OSX
+ linux: Linux
+ windows: Windows
+ 386: i386
+ amd64: x86_64
+ freebsd: FreeBSD
+ netbsd: NetBSD
+ format_overrides:
+ - goos: windows
+ format: zip
+ files:
+ - LICENSE
+checksum:
+ name_template: 'checksums.txt'
+snapshot:
+ name_template: "{{ .Tag }}-next"
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^doc:'
+ - '^docs:'
+ - '^test:'
+ - '^tests:'
+ - '^Update\sREADME.md'
+
+nfpms:
+ -
+ file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+ vendor: Klaus Post
+ homepage: https://github.com/klauspost/cpuid
+ maintainer: Klaus Post
+ description: CPUID Tool
+ license: BSD 3-Clause
+ formats:
+ - deb
+ - rpm
+ replacements:
+ darwin: Darwin
+ linux: Linux
+ freebsd: FreeBSD
+ amd64: x86_64
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/test/integration/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
new file mode 100644
index 000000000..2ef4714f7
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt
@@ -0,0 +1,35 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2015- Klaus Post & Contributors.
+Email: klauspost@gmail.com
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/LICENSE b/test/integration/vendor/github.com/klauspost/cpuid/v2/LICENSE
new file mode 100644
index 000000000..5cec7ee94
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/README.md b/test/integration/vendor/github.com/klauspost/cpuid/v2/README.md
new file mode 100644
index 000000000..ea7df3dd8
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -0,0 +1,258 @@
+# cpuid
+Package cpuid provides information about the CPU running the current program.
+
+CPU features are detected on startup, and kept for fast access through the life of the application.
+Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
+
+You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+
+Package home: https://github.com/klauspost/cpuid
+
+[](https://pkg.go.dev/github.com/klauspost/cpuid/v2)
+[![Build Status][3]][4]
+
+[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master
+[4]: https://travis-ci.org/klauspost/cpuid
+
+## installing
+
+`go get -u github.com/klauspost/cpuid/v2` using modules.
+
+Drop `v2` for others.
+
+## example
+
+```Go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ . "github.com/klauspost/cpuid/v2"
+)
+
+func main() {
+ // Print basic CPU information:
+ fmt.Println("Name:", CPU.BrandName)
+ fmt.Println("PhysicalCores:", CPU.PhysicalCores)
+ fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore)
+ fmt.Println("LogicalCores:", CPU.LogicalCores)
+ fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID)
+ fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ","))
+ fmt.Println("Cacheline bytes:", CPU.CacheLine)
+ fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes")
+ fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
+ fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes")
+ fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes")
+ fmt.Println("Frequency", CPU.Hz, "hz")
+
+ // Test if we have these specific features:
+ if CPU.Supports(SSE, SSE2) {
+ fmt.Println("We have Streaming SIMD 2 Extensions")
+ }
+}
+```
+
+Sample output:
+```
+>go run main.go
+Name: AMD Ryzen 9 3950X 16-Core Processor
+PhysicalCores: 16
+ThreadsPerCore: 2
+LogicalCores: 32
+Family 23 Model: 113 Vendor ID: AMD
+Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3
+Cacheline bytes: 64
+L1 Data Cache: 32768 bytes
+L1 Instruction Cache: 32768 bytes
+L2 Cache: 524288 bytes
+L3 Cache: 16777216 bytes
+Frequency 0 hz
+We have Streaming SIMD 2 Extensions
+```
+
+# usage
+
+The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
+A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
+
+Note that for some cpu/os combinations some features will not be detected.
+`amd64` has rather good support and should work reliably on all platforms.
+
+Note that hypervisors may not pass through all CPU features.
+
+## arm64 feature detection
+
+Not all operating systems provide ARM features directly
+and there is no safe way to do so for the rest.
+
+Currently `arm64/linux` and `arm64/freebsd` should be quite reliable.
+`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected.
+
+A `DetectARM()` can be used if you are able to control your deployment,
+it will detect CPU features, but may crash if the OS doesn't intercept the calls.
+A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below.
+
+Note that currently only features are detected on ARM,
+no additional information is currently available.
+
+## flags
+
+It is possible to add flags that affects cpu detection.
+
+For this the `Flags()` command is provided.
+
+This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called.
+
+This means that any detection used in `init()` functions will not contain these flags.
+
+Example:
+
+```Go
+package main
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/klauspost/cpuid/v2"
+)
+
+func main() {
+ cpuid.Flags()
+ flag.Parse()
+ cpuid.Detect()
+
+ // Test if we have these specific features:
+ if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) {
+ fmt.Println("We have Streaming SIMD 2 Extensions")
+ }
+}
+```
+
+## commandline
+
+Download as binary from: https://github.com/klauspost/cpuid/releases
+
+Install from source:
+
+`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
+
+### Example
+
+```
+λ cpuid
+Name: AMD Ryzen 9 3950X 16-Core Processor
+Vendor String: AuthenticAMD
+Vendor ID: AMD
+PhysicalCores: 16
+Threads Per Core: 2
+Logical Cores: 32
+CPU Family 23 Model: 113
+Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE
+Microarchitecture level: 3
+Cacheline bytes: 64
+L1 Instruction Cache: 32768 bytes
+L1 Data Cache: 32768 bytes
+L2 Cache: 524288 bytes
+L3 Cache: 16777216 bytes
+
+```
+### JSON Output:
+
+```
+λ cpuid --json
+{
+ "BrandName": "AMD Ryzen 9 3950X 16-Core Processor",
+ "VendorID": 2,
+ "VendorString": "AuthenticAMD",
+ "PhysicalCores": 16,
+ "ThreadsPerCore": 2,
+ "LogicalCores": 32,
+ "Family": 23,
+ "Model": 113,
+ "CacheLine": 64,
+ "Hz": 0,
+ "BoostFreq": 0,
+ "Cache": {
+ "L1I": 32768,
+ "L1D": 32768,
+ "L2": 524288,
+ "L3": 16777216
+ },
+ "SGX": {
+ "Available": false,
+ "LaunchControl": false,
+ "SGX1Supported": false,
+ "SGX2Supported": false,
+ "MaxEnclaveSizeNot64": 0,
+ "MaxEnclaveSize64": 0,
+ "EPCSections": null
+ },
+ "Features": [
+ "ADX",
+ "AESNI",
+ "AVX",
+ "AVX2",
+ "BMI1",
+ "BMI2",
+ "CLMUL",
+ "CLZERO",
+ "CMOV",
+ "CMPXCHG8",
+ "CPBOOST",
+ "CX16",
+ "F16C",
+ "FMA3",
+ "FXSR",
+ "FXSROPT",
+ "HTT",
+ "HYPERVISOR",
+ "LAHF",
+ "LZCNT",
+ "MCAOVERFLOW",
+ "MMX",
+ "MMXEXT",
+ "MOVBE",
+ "NX",
+ "OSXSAVE",
+ "POPCNT",
+ "RDRAND",
+ "RDSEED",
+ "RDTSCP",
+ "SCE",
+ "SHA",
+ "SSE",
+ "SSE2",
+ "SSE3",
+ "SSE4",
+ "SSE42",
+ "SSE4A",
+ "SSSE3",
+ "SUCCOR",
+ "X87",
+ "XSAVE"
+ ],
+ "X64Level": 3
+}
+```
+
+### Check CPU microarch level
+
+```
+λ cpuid --check-level=3
+2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor
+2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3.
+Exit Code 0
+
+λ cpuid --check-level=4
+2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor
+2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3.
+Exit Code 1
+```
+
+# license
+
+This code is published under an MIT license. See LICENSE file for more information.
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid.go
new file mode 100644
index 000000000..701f2385b
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -0,0 +1,1262 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// Package cpuid provides information about the CPU running the current program.
+//
+// CPU features are detected on startup, and kept for fast access through the life of the application.
+// Currently x86 / x64 (AMD64) as well as arm64 is supported.
+//
+// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+//
+// Package home: https://github.com/klauspost/cpuid
+package cpuid
+
+import (
+ "flag"
+ "fmt"
+ "math"
+ "math/bits"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf
+// and Processor Programming Reference (PPR)
+
+// Vendor is a representation of a CPU vendor.
+type Vendor int
+
+const (
+ VendorUnknown Vendor = iota
+ Intel
+ AMD
+ VIA
+ Transmeta
+ NSC
+ KVM // Kernel-based Virtual Machine
+ MSVM // Microsoft Hyper-V or Windows Virtual PC
+ VMware
+ XenHVM
+ Bhyve
+ Hygon
+ SiS
+ RDC
+
+ Ampere
+ ARM
+ Broadcom
+ Cavium
+ DEC
+ Fujitsu
+ Infineon
+ Motorola
+ NVIDIA
+ AMCC
+ Qualcomm
+ Marvell
+
+ lastVendor
+)
+
+//go:generate stringer -type=FeatureID,Vendor
+
+// FeatureID is the ID of a specific cpu feature.
+type FeatureID int
+
+const (
+ // Keep index -1 as unknown
+ UNKNOWN = -1
+
+ // Add features
+ ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+ AESNI // Advanced Encryption Standard New Instructions
+ AMD3DNOW // AMD 3DNOW
+ AMD3DNOWEXT // AMD 3DNowExt
+ AMXBF16 // Tile computational operations on BFLOAT16 numbers
+ AMXINT8 // Tile computational operations on 8-bit integers
+ AMXTILE // Tile architecture
+ AVX // AVX functions
+ AVX2 // AVX2 functions
+ AVX512BF16 // AVX-512 BFLOAT16 Instructions
+ AVX512BITALG // AVX-512 Bit Algorithms
+ AVX512BW // AVX-512 Byte and Word Instructions
+ AVX512CD // AVX-512 Conflict Detection Instructions
+ AVX512DQ // AVX-512 Doubleword and Quadword Instructions
+ AVX512ER // AVX-512 Exponential and Reciprocal Instructions
+ AVX512F // AVX-512 Foundation
+ AVX512FP16 // AVX-512 FP16 Instructions
+ AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
+ AVX512PF // AVX-512 Prefetch Instructions
+ AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
+ AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2
+ AVX512VL // AVX-512 Vector Length Extensions
+ AVX512VNNI // AVX-512 Vector Neural Network Instructions
+ AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
+ AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
+ AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one
+ AVXVNNI // AVX (VEX encoded) VNNI neural network instructions
+ BMI1 // Bit Manipulation Instruction Set 1
+ BMI2 // Bit Manipulation Instruction Set 2
+ CETIBT // Intel CET Indirect Branch Tracking
+ CETSS // Intel CET Shadow Stack
+ CLDEMOTE // Cache Line Demote
+ CLMUL // Carry-less Multiplication
+ CLZERO // CLZERO instruction supported
+ CMOV // i686 CMOV
+ CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB
+ CMPXCHG8 // CMPXCHG8 instruction
+ CPBOOST // Core Performance Boost
+ CX16 // CMPXCHG16B Instruction
+ ENQCMD // Enqueue Command
+ ERMS // Enhanced REP MOVSB/STOSB
+ F16C // Half-precision floating-point conversion
+ FMA3 // Intel FMA 3. Does not imply AVX.
+ FMA4 // Bulldozer FMA4 functions
+ FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
+ FXSROPT // FXSAVE/FXRSTOR optimizations
+ GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage.
+ HLE // Hardware Lock Elision
+ HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR
+ HTT // Hyperthreading (enabled)
+ HWA // Hardware assert supported. Indicates support for MSRC001_10
+ HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
+ IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
+ IBS // Instruction Based Sampling (AMD)
+ IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
+ IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
+ IBSFFV // Instruction Based Sampling Feature (AMD)
+ IBSOPCNT // Instruction Based Sampling Feature (AMD)
+ IBSOPCNTEXT // Instruction Based Sampling Feature (AMD)
+ IBSOPSAM // Instruction Based Sampling Feature (AMD)
+ IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
+ IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
+ IBS_PREVENTHOST // Disallowing IBS use by the host supported
+ INT_WBINVD // WBINVD/WBNOINVD are interruptible.
+ INVLPGB // NVLPGB and TLBSYNC instruction supported
+ LAHF // LAHF/SAHF in long mode
+ LAM // If set, CPU supports Linear Address Masking
+ LBRVIRT // LBR virtualization
+ LZCNT // LZCNT instruction
+ MCAOVERFLOW // MCA overflow recovery support.
+ MCOMMIT // MCOMMIT instruction supported
+ MMX // standard MMX
+ MMXEXT // SSE integer functions or AMD MMX ext
+ MOVBE // MOVBE instruction (big-endian)
+ MOVDIR64B // Move 64 Bytes as Direct Store
+ MOVDIRI // Move Doubleword as Direct Store
+ MOVSB_ZL // Fast Zero-Length MOVSB
+ MPX // Intel MPX (Memory Protection Extensions)
+ MSRIRC // Instruction Retired Counter MSR available
+ MSR_PAGEFLUSH // Page Flush MSR available
+ NRIPS // Indicates support for NRIP save on VMEXIT
+ NX // NX (No-Execute) bit
+ OSXSAVE // XSAVE enabled by OS
+ PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption
+ POPCNT // POPCNT instruction
+ RDPRU // RDPRU instruction supported
+ RDRAND // RDRAND instruction is available
+ RDSEED // RDSEED instruction is available
+ RDTSCP // RDTSCP Instruction
+ RTM // Restricted Transactional Memory
+ RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
+ SCE // SYSENTER and SYSEXIT instructions
+ SERIALIZE // Serialize Instruction Execution
+ SEV // AMD Secure Encrypted Virtualization supported
+ SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host
+ SEV_ALTERNATIVE // AMD SEV Alternate Injection supported
+ SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests
+ SEV_ES // AMD SEV Encrypted State supported
+ SEV_RESTRICTED // AMD SEV Restricted Injection supported
+ SEV_SNP // AMD SEV Secure Nested Paging supported
+ SGX // Software Guard Extensions
+ SGXLC // Software Guard Extensions Launch Control
+ SHA // Intel SHA Extensions
+ SME // AMD Secure Memory Encryption supported
+ SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
+ SSE // SSE functions
+ SSE2 // P4 SSE functions
+ SSE3 // Prescott SSE3 functions
+ SSE4 // Penryn SSE4.1 functions
+ SSE42 // Nehalem SSE4.2 functions
+ SSE4A // AMD Barcelona microarchitecture SSE4a instructions
+ SSSE3 // Conroe SSSE3 functions
+ STIBP // Single Thread Indirect Branch Predictors
+ STOSB_SHORT // Fast short STOSB
+ SUCCOR // Software uncorrectable error containment and recovery capability.
+ SVM // AMD Secure Virtual Machine
+ SVMDA // Indicates support for the SVM decode assists.
+ SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control
+ SVML // AMD SVM lock. Indicates support for SVM-Lock.
+ SVMNP // AMD SVM nested paging
+ SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter
+ SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold
+ TBM // AMD Trailing Bit Manipulation
+ TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
+ TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104
+ TSXLDTRK // Intel TSX Suspend Load Address Tracking
+ VAES // Vector AES. AVX(512) versions requires additional checks.
+ VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits.
+ VMPL // AMD VM Permission Levels supported
+ VMSA_REGPROT // AMD VMSA Register Protection supported
+ VMX // Virtual Machine Extensions
+ VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions.
+ VTE // AMD Virtual Transparent Encryption supported
+ WAITPKG // TPAUSE, UMONITOR, UMWAIT
+ WBNOINVD // Write Back and Do Not Invalidate Cache
+ X87 // FPU
+ XGETBV1 // Supports XGETBV with ECX = 1
+ XOP // Bulldozer XOP functions
+ XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV
+ XSAVEC // Supports XSAVEC and the compacted form of XRSTOR.
+ XSAVEOPT // XSAVEOPT available
+ XSAVES // Supports XSAVES/XRSTORS and IA32_XSS
+
+ // ARM features:
+ AESARM // AES instructions
+ ARMCPUID // Some CPU ID registers readable at user-level
+ ASIMD // Advanced SIMD
+ ASIMDDP // SIMD Dot Product
+ ASIMDHP // Advanced SIMD half-precision floating point
+ ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
+ ATOMICS // Large System Extensions (LSE)
+ CRC32 // CRC32/CRC32C instructions
+ DCPOP // Data cache clean to Point of Persistence (DC CVAP)
+ EVTSTRM // Generic timer
+ FCMA // Floatin point complex number addition and multiplication
+ FP // Single-precision and double-precision floating point
+ FPHP // Half-precision floating point
+ GPA // Generic Pointer Authentication
+ JSCVT // Javascript-style double->int convert (FJCVTZS)
+ LRCPC // Weaker release consistency (LDAPR, etc)
+ PMULL // Polynomial Multiply instructions (PMULL/PMULL2)
+ SHA1 // SHA-1 instructions (SHA1C, etc)
+ SHA2 // SHA-2 instructions (SHA256H, etc)
+ SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
+ SHA512 // SHA512 instructions
+ SM3 // SM3 instructions
+ SM4 // SM4 instructions
+ SVE // Scalable Vector Extension
+ // Keep it last. It automatically defines the size of []flagSet
+ lastID
+
+ firstID FeatureID = UNKNOWN + 1
+)
+
+// CPUInfo contains information about the detected system CPU.
+type CPUInfo struct {
+ BrandName string // Brand name reported by the CPU
+ VendorID Vendor // Comparable CPU vendor ID
+ VendorString string // Raw vendor string.
+ featureSet flagSet // Features of the CPU
+ PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
+ ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
+ LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
+ Family int // CPU family number
+ Model int // CPU model number
+ CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
+ Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
+ BoostFreq int64 // Max clock speed, if known, 0 otherwise
+ Cache struct {
+ L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
+ L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
+ L2 int // L2 Cache (per core or shared). Will be -1 if undetected
+ L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected
+ }
+ SGX SGXSupport
+ maxFunc uint32
+ maxExFunc uint32
+}
+
+var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
+var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+var xgetbv func(index uint32) (eax, edx uint32)
+var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
+var darwinHasAVX512 = func() bool { return false }
+
+// CPU contains information about the CPU as detected on startup,
+// or when Detect last was called.
+//
+// Use this as the primary entry point to you data.
+var CPU CPUInfo
+
+func init() {
+ initCPU()
+ Detect()
+}
+
+// Detect will re-detect current CPU info.
+// This will replace the content of the exported CPU variable.
+//
+// Unless you expect the CPU to change while you are running your program
+// you should not need to call this function.
+// If you call this, you must ensure that no other goroutine is accessing the
+// exported CPU variable.
+func Detect() {
+ // Set defaults
+ CPU.ThreadsPerCore = 1
+ CPU.Cache.L1I = -1
+ CPU.Cache.L1D = -1
+ CPU.Cache.L2 = -1
+ CPU.Cache.L3 = -1
+ safe := true
+ if detectArmFlag != nil {
+ safe = !*detectArmFlag
+ }
+ addInfo(&CPU, safe)
+ if displayFeats != nil && *displayFeats {
+ fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ","))
+ // Exit with non-zero so tests will print value.
+ os.Exit(1)
+ }
+ if disableFlag != nil {
+ s := strings.Split(*disableFlag, ",")
+ for _, feat := range s {
+ feat := ParseFeature(strings.TrimSpace(feat))
+ if feat != UNKNOWN {
+ CPU.featureSet.unset(feat)
+ }
+ }
+ }
+}
+
+// DetectARM will detect ARM64 features.
+// This is NOT done automatically since it can potentially crash
+// if the OS does not handle the command.
+// If in the future this can be done safely this function may not
+// do anything.
+func DetectARM() {
+ addInfo(&CPU, false)
+}
+
+var detectArmFlag *bool
+var displayFeats *bool
+var disableFlag *string
+
+// Flags will enable flags.
+// This must be called *before* flag.Parse AND
+// Detect must be called after the flags have been parsed.
+// Note that this means that any detection used in init() functions
+// will not contain these flags.
+func Flags() {
+ disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list")
+ displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits")
+ detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash")
+}
+
+// Supports returns whether the CPU supports all of the requested features.
+func (c CPUInfo) Supports(ids ...FeatureID) bool {
+ for _, id := range ids {
+ if !c.featureSet.inSet(id) {
+ return false
+ }
+ }
+ return true
+}
+
+// Has allows for checking a single feature.
+// Should be inlined by the compiler.
+func (c CPUInfo) Has(id FeatureID) bool {
+ return c.featureSet.inSet(id)
+}
+
+// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2)
+var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
+var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
+var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
+
+// X64Level returns the microarchitecture level detected on the CPU.
+// If features are lacking or non x64 mode, 0 is returned.
+// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+func (c CPUInfo) X64Level() int {
+ if c.featureSet.hasSet(level4Features) {
+ return 4
+ }
+ if c.featureSet.hasSet(level3Features) {
+ return 3
+ }
+ if c.featureSet.hasSet(level2Features) {
+ return 2
+ }
+ if c.featureSet.hasSet(level1Features) {
+ return 1
+ }
+ return 0
+}
+
+// Disable will disable one or several features.
+func (c *CPUInfo) Disable(ids ...FeatureID) bool {
+ for _, id := range ids {
+ c.featureSet.unset(id)
+ }
+ return true
+}
+
+// Enable will disable one or several features even if they were undetected.
+// This is of course not recommended for obvious reasons.
+func (c *CPUInfo) Enable(ids ...FeatureID) bool {
+ for _, id := range ids {
+ c.featureSet.set(id)
+ }
+ return true
+}
+
+// IsVendor returns true if vendor is recognized as Intel
+func (c CPUInfo) IsVendor(v Vendor) bool {
+ return c.VendorID == v
+}
+
+// FeatureSet returns all available features as strings.
+func (c CPUInfo) FeatureSet() []string {
+ s := make([]string, 0, c.featureSet.nEnabled())
+ s = append(s, c.featureSet.Strings()...)
+ return s
+}
+
+// RTCounter returns the 64-bit time-stamp counter
+// Uses the RDTSCP instruction. The value 0 is returned
+// if the CPU does not support the instruction.
+func (c CPUInfo) RTCounter() uint64 {
+ if !c.Supports(RDTSCP) {
+ return 0
+ }
+ a, _, _, d := rdtscpAsm()
+ return uint64(a) | (uint64(d) << 32)
+}
+
+// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
+// This variable is OS dependent, but on Linux contains information
+// about the current cpu/core the code is running on.
+// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
+func (c CPUInfo) Ia32TscAux() uint32 {
+ if !c.Supports(RDTSCP) {
+ return 0
+ }
+ _, _, ecx, _ := rdtscpAsm()
+ return ecx
+}
+
+// LogicalCPU will return the Logical CPU the code is currently executing on.
+// This is likely to change when the OS re-schedules the running thread
+// to another CPU.
+// If the current core cannot be detected, -1 will be returned.
+func (c CPUInfo) LogicalCPU() int {
+ if c.maxFunc < 1 {
+ return -1
+ }
+ _, ebx, _, _ := cpuid(1)
+ return int(ebx >> 24)
+}
+
+// frequencies tries to compute the clock speed of the CPU. If leaf 15 is
+// supported, use it, otherwise parse the brand string. Yes, really.
+func (c *CPUInfo) frequencies() {
+ c.Hz, c.BoostFreq = 0, 0
+ mfi := maxFunctionID()
+ if mfi >= 0x15 {
+ eax, ebx, ecx, _ := cpuid(0x15)
+ if eax != 0 && ebx != 0 && ecx != 0 {
+ c.Hz = (int64(ecx) * int64(ebx)) / int64(eax)
+ }
+ }
+ if mfi >= 0x16 {
+ a, b, _, _ := cpuid(0x16)
+ // Base...
+ if a&0xffff > 0 {
+ c.Hz = int64(a&0xffff) * 1_000_000
+ }
+ // Boost...
+ if b&0xffff > 0 {
+ c.BoostFreq = int64(b&0xffff) * 1_000_000
+ }
+ }
+ if c.Hz > 0 {
+ return
+ }
+
+ // computeHz determines the official rated speed of a CPU from its brand
+ // string. This insanity is *actually the official documented way to do
+ // this according to Intel*, prior to leaf 0x15 existing. The official
+ // documentation only shows this working for exactly `x.xx` or `xxxx`
+ // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other
+ // sizes.
+ model := c.BrandName
+ hz := strings.LastIndex(model, "Hz")
+ if hz < 3 {
+ return
+ }
+ var multiplier int64
+ switch model[hz-1] {
+ case 'M':
+ multiplier = 1000 * 1000
+ case 'G':
+ multiplier = 1000 * 1000 * 1000
+ case 'T':
+ multiplier = 1000 * 1000 * 1000 * 1000
+ }
+ if multiplier == 0 {
+ return
+ }
+ freq := int64(0)
+ divisor := int64(0)
+ decimalShift := int64(1)
+ var i int
+ for i = hz - 2; i >= 0 && model[i] != ' '; i-- {
+ if model[i] >= '0' && model[i] <= '9' {
+ freq += int64(model[i]-'0') * decimalShift
+ decimalShift *= 10
+ } else if model[i] == '.' {
+ if divisor != 0 {
+ return
+ }
+ divisor = decimalShift
+ } else {
+ return
+ }
+ }
+ // we didn't find a space
+ if i < 0 {
+ return
+ }
+ if divisor != 0 {
+ c.Hz = (freq * multiplier) / divisor
+ return
+ }
+ c.Hz = freq * multiplier
+}
+
+// VM Will return true if the cpu id indicates we are in
+// a virtual machine.
+func (c CPUInfo) VM() bool {
+ return CPU.featureSet.inSet(HYPERVISOR)
+}
+
+// flags contains detected cpu features and characteristics
+type flags uint64
+
+// log2(bits_in_uint64)
+const flagBitsLog2 = 6
+const flagBits = 1 << flagBitsLog2
+const flagMask = flagBits - 1
+
+// flagSet contains detected cpu features and characteristics in an array of flags
+type flagSet [(lastID + flagMask) / flagBits]flags
+
+func (s flagSet) inSet(feat FeatureID) bool {
+ return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
+}
+
+func (s *flagSet) set(feat FeatureID) {
+ s[feat>>flagBitsLog2] |= 1 << (feat & flagMask)
+}
+
+// setIf will set a feature if boolean is true.
+func (s *flagSet) setIf(cond bool, features ...FeatureID) {
+ if cond {
+ for _, offset := range features {
+ s[offset>>flagBitsLog2] |= 1 << (offset & flagMask)
+ }
+ }
+}
+
+func (s *flagSet) unset(offset FeatureID) {
+ bit := flags(1 << (offset & flagMask))
+ s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit
+}
+
+// or with another flagset.
+func (s *flagSet) or(other flagSet) {
+ for i, v := range other[:] {
+ s[i] |= v
+ }
+}
+
+// hasSet returns whether all features are present.
+func (s flagSet) hasSet(other flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != v {
+ return false
+ }
+ }
+ return true
+}
+
+// nEnabled will return the number of enabled flags.
+func (s flagSet) nEnabled() (n int) {
+ for _, v := range s[:] {
+ n += bits.OnesCount64(uint64(v))
+ }
+ return n
+}
+
+func flagSetWith(feat ...FeatureID) flagSet {
+ var res flagSet
+ for _, f := range feat {
+ res.set(f)
+ }
+ return res
+}
+
+// ParseFeature will parse the string and return the ID of the matching feature.
+// Will return UNKNOWN if not found.
+func ParseFeature(s string) FeatureID {
+ s = strings.ToUpper(s)
+ for i := firstID; i < lastID; i++ {
+ if i.String() == s {
+ return i
+ }
+ }
+ return UNKNOWN
+}
+
+// Strings returns an array of the detected features for FlagsSet.
+func (s flagSet) Strings() []string {
+ if len(s) == 0 {
+ return []string{""}
+ }
+ r := make([]string, 0)
+ for i := firstID; i < lastID; i++ {
+ if s.inSet(i) {
+ r = append(r, i.String())
+ }
+ }
+ return r
+}
+
+func maxExtendedFunction() uint32 {
+ eax, _, _, _ := cpuid(0x80000000)
+ return eax
+}
+
+func maxFunctionID() uint32 {
+ a, _, _, _ := cpuid(0)
+ return a
+}
+
+func brandName() string {
+ if maxExtendedFunction() >= 0x80000004 {
+ v := make([]uint32, 0, 48)
+ for i := uint32(0); i < 3; i++ {
+ a, b, c, d := cpuid(0x80000002 + i)
+ v = append(v, a, b, c, d)
+ }
+ return strings.Trim(string(valAsString(v...)), " ")
+ }
+ return "unknown"
+}
+
+func threadsPerCore() int {
+ mfi := maxFunctionID()
+ vend, _ := vendorID()
+
+ if mfi < 0x4 || (vend != Intel && vend != AMD) {
+ return 1
+ }
+
+ if mfi < 0xb {
+ if vend != Intel {
+ return 1
+ }
+ _, b, _, d := cpuid(1)
+ if (d & (1 << 28)) != 0 {
+ // v will contain logical core count
+ v := (b >> 16) & 255
+ if v > 1 {
+ a4, _, _, _ := cpuid(4)
+ // physical cores
+ v2 := (a4 >> 26) + 1
+ if v2 > 0 {
+ return int(v) / int(v2)
+ }
+ }
+ }
+ return 1
+ }
+ _, b, _, _ := cpuidex(0xb, 0)
+ if b&0xffff == 0 {
+ if vend == AMD {
+ // Workaround for AMD returning 0, assume 2 if >= Zen 2
+ // It will be more correct than not.
+ fam, _ := familyModel()
+ _, _, _, d := cpuid(1)
+ if (d&(1<<28)) != 0 && fam >= 23 {
+ return 2
+ }
+ }
+ return 1
+ }
+ return int(b & 0xffff)
+}
+
+func logicalCores() int {
+ mfi := maxFunctionID()
+ v, _ := vendorID()
+ switch v {
+ case Intel:
+ // Use this on old Intel processors
+ if mfi < 0xb {
+ if mfi < 1 {
+ return 0
+ }
+ // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
+ // that can be assigned to logical processors in a physical package.
+ // The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
+ _, ebx, _, _ := cpuid(1)
+ logical := (ebx >> 16) & 0xff
+ return int(logical)
+ }
+ _, b, _, _ := cpuidex(0xb, 1)
+ return int(b & 0xffff)
+ case AMD, Hygon:
+ _, b, _, _ := cpuid(1)
+ return int((b >> 16) & 0xff)
+ default:
+ return 0
+ }
+}
+
+func familyModel() (int, int) {
+ if maxFunctionID() < 0x1 {
+ return 0, 0
+ }
+ eax, _, _, _ := cpuid(1)
+ family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
+ model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
+ return int(family), int(model)
+}
+
+func physicalCores() int {
+ v, _ := vendorID()
+ switch v {
+ case Intel:
+ return logicalCores() / threadsPerCore()
+ case AMD, Hygon:
+ lc := logicalCores()
+ tpc := threadsPerCore()
+ if lc > 0 && tpc > 0 {
+ return lc / tpc
+ }
+
+ // The following is inaccurate on AMD EPYC 7742 64-Core Processor
+ if maxExtendedFunction() >= 0x80000008 {
+ _, _, c, _ := cpuid(0x80000008)
+ if c&0xff > 0 {
+ return int(c&0xff) + 1
+ }
+ }
+ }
+ return 0
+}
+
+// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
+var vendorMapping = map[string]Vendor{
+ "AMDisbetter!": AMD,
+ "AuthenticAMD": AMD,
+ "CentaurHauls": VIA,
+ "GenuineIntel": Intel,
+ "TransmetaCPU": Transmeta,
+ "GenuineTMx86": Transmeta,
+ "Geode by NSC": NSC,
+ "VIA VIA VIA ": VIA,
+ "KVMKVMKVMKVM": KVM,
+ "Microsoft Hv": MSVM,
+ "VMwareVMware": VMware,
+ "XenVMMXenVMM": XenHVM,
+ "bhyve bhyve ": Bhyve,
+ "HygonGenuine": Hygon,
+ "Vortex86 SoC": SiS,
+ "SiS SiS SiS ": SiS,
+ "RiseRiseRise": SiS,
+ "Genuine RDC": RDC,
+}
+
+func vendorID() (Vendor, string) {
+ _, b, c, d := cpuid(0)
+ v := string(valAsString(b, d, c))
+ vend, ok := vendorMapping[v]
+ if !ok {
+ return VendorUnknown, v
+ }
+ return vend, v
+}
+
+func cacheLine() int {
+ if maxFunctionID() < 0x1 {
+ return 0
+ }
+
+ _, ebx, _, _ := cpuid(1)
+ cache := (ebx & 0xff00) >> 5 // cflush size
+ if cache == 0 && maxExtendedFunction() >= 0x80000006 {
+ _, _, ecx, _ := cpuid(0x80000006)
+ cache = ecx & 0xff // cacheline size
+ }
+ // TODO: Read from Cache and TLB Information
+ return int(cache)
+}
+
+func (c *CPUInfo) cacheSize() {
+ c.Cache.L1D = -1
+ c.Cache.L1I = -1
+ c.Cache.L2 = -1
+ c.Cache.L3 = -1
+ vendor, _ := vendorID()
+ switch vendor {
+ case Intel:
+ if maxFunctionID() < 4 {
+ return
+ }
+ c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0
+ for i := uint32(0); ; i++ {
+ eax, ebx, ecx, _ := cpuidex(4, i)
+ cacheType := eax & 15
+ if cacheType == 0 {
+ break
+ }
+ cacheLevel := (eax >> 5) & 7
+ coherency := int(ebx&0xfff) + 1
+ partitions := int((ebx>>12)&0x3ff) + 1
+ associativity := int((ebx>>22)&0x3ff) + 1
+ sets := int(ecx) + 1
+ size := associativity * partitions * coherency * sets
+ switch cacheLevel {
+ case 1:
+ if cacheType == 1 {
+ // 1 = Data Cache
+ c.Cache.L1D = size
+ } else if cacheType == 2 {
+ // 2 = Instruction Cache
+ c.Cache.L1I = size
+ } else {
+ if c.Cache.L1D < 0 {
+ c.Cache.L1I = size
+ }
+ if c.Cache.L1I < 0 {
+ c.Cache.L1I = size
+ }
+ }
+ case 2:
+ c.Cache.L2 = size
+ case 3:
+ c.Cache.L3 = size
+ }
+ }
+ case AMD, Hygon:
+ // Untested.
+ if maxExtendedFunction() < 0x80000005 {
+ return
+ }
+ _, _, ecx, edx := cpuid(0x80000005)
+ c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
+ c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
+
+ if maxExtendedFunction() < 0x80000006 {
+ return
+ }
+ _, _, ecx, _ = cpuid(0x80000006)
+ c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
+
+ // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
+ if maxExtendedFunction() < 0x8000001D {
+ return
+ }
+
+ // Xen Hypervisor is buggy and returns the same entry no matter ECX value.
+ // Hack: When we encounter the same entry 100 times we break.
+ nSame := 0
+ var last uint32
+ for i := uint32(0); i < math.MaxUint32; i++ {
+ eax, ebx, ecx, _ := cpuidex(0x8000001D, i)
+
+ level := (eax >> 5) & 7
+ cacheNumSets := ecx + 1
+ cacheLineSize := 1 + (ebx & 2047)
+ cachePhysPartitions := 1 + ((ebx >> 12) & 511)
+ cacheNumWays := 1 + ((ebx >> 22) & 511)
+
+ typ := eax & 15
+ size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays)
+ if typ == 0 {
+ return
+ }
+
+ // Check for the same value repeated.
+ comb := eax ^ ebx ^ ecx
+ if comb == last {
+ nSame++
+ if nSame == 100 {
+ return
+ }
+ }
+ last = comb
+
+ switch level {
+ case 1:
+ switch typ {
+ case 1:
+ // Data cache
+ c.Cache.L1D = size
+ case 2:
+ // Inst cache
+ c.Cache.L1I = size
+ default:
+ if c.Cache.L1D < 0 {
+ c.Cache.L1I = size
+ }
+ if c.Cache.L1I < 0 {
+ c.Cache.L1I = size
+ }
+ }
+ case 2:
+ c.Cache.L2 = size
+ case 3:
+ c.Cache.L3 = size
+ }
+ }
+ }
+}
+
+type SGXEPCSection struct {
+ BaseAddress uint64
+ EPCSize uint64
+}
+
+type SGXSupport struct {
+ Available bool
+ LaunchControl bool
+ SGX1Supported bool
+ SGX2Supported bool
+ MaxEnclaveSizeNot64 int64
+ MaxEnclaveSize64 int64
+ EPCSections []SGXEPCSection
+}
+
+func hasSGX(available, lc bool) (rval SGXSupport) {
+ rval.Available = available
+
+ if !available {
+ return
+ }
+
+ rval.LaunchControl = lc
+
+ a, _, _, d := cpuidex(0x12, 0)
+ rval.SGX1Supported = a&0x01 != 0
+ rval.SGX2Supported = a&0x02 != 0
+ rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2
+ rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
+ rval.EPCSections = make([]SGXEPCSection, 0)
+
+ for subleaf := uint32(2); subleaf < 2+8; subleaf++ {
+ eax, ebx, ecx, edx := cpuidex(0x12, subleaf)
+ leafType := eax & 0xf
+
+ if leafType == 0 {
+ // Invalid subleaf, stop iterating
+ break
+ } else if leafType == 1 {
+ // EPC Section subleaf
+ baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32)
+ size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32)
+
+ section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size}
+ rval.EPCSections = append(rval.EPCSections, section)
+ }
+ }
+
+ return
+}
+
+func support() flagSet {
+ var fs flagSet
+ mfi := maxFunctionID()
+ vend, _ := vendorID()
+ if mfi < 0x1 {
+ return fs
+ }
+ family, model := familyModel()
+
+ _, _, c, d := cpuid(1)
+ fs.setIf((d&(1<<0)) != 0, X87)
+ fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
+ fs.setIf((d&(1<<11)) != 0, SCE)
+ fs.setIf((d&(1<<15)) != 0, CMOV)
+ fs.setIf((d&(1<<22)) != 0, MMXEXT)
+ fs.setIf((d&(1<<23)) != 0, MMX)
+ fs.setIf((d&(1<<24)) != 0, FXSR)
+ fs.setIf((d&(1<<25)) != 0, FXSROPT)
+ fs.setIf((d&(1<<25)) != 0, SSE)
+ fs.setIf((d&(1<<26)) != 0, SSE2)
+ fs.setIf((c&1) != 0, SSE3)
+ fs.setIf((c&(1<<5)) != 0, VMX)
+ fs.setIf((c&0x00000200) != 0, SSSE3)
+ fs.setIf((c&0x00080000) != 0, SSE4)
+ fs.setIf((c&0x00100000) != 0, SSE42)
+ fs.setIf((c&(1<<25)) != 0, AESNI)
+ fs.setIf((c&(1<<1)) != 0, CLMUL)
+ fs.setIf(c&(1<<22) != 0, MOVBE)
+ fs.setIf(c&(1<<23) != 0, POPCNT)
+ fs.setIf(c&(1<<30) != 0, RDRAND)
+
+ // This bit has been reserved by Intel & AMD for use by hypervisors,
+ // and indicates the presence of a hypervisor.
+ fs.setIf(c&(1<<31) != 0, HYPERVISOR)
+ fs.setIf(c&(1<<29) != 0, F16C)
+ fs.setIf(c&(1<<13) != 0, CX16)
+
+ if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
+ fs.setIf(threadsPerCore() > 1, HTT)
+ }
+ if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 {
+ fs.setIf(threadsPerCore() > 1, HTT)
+ }
+ fs.setIf(c&1<<26 != 0, XSAVE)
+ fs.setIf(c&1<<27 != 0, OSXSAVE)
+ // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits
+ const avxCheck = 1<<26 | 1<<27 | 1<<28
+ if c&avxCheck == avxCheck {
+ // Check for OS support
+ eax, _ := xgetbv(0)
+ if (eax & 0x6) == 0x6 {
+ fs.set(AVX)
+ switch vend {
+ case Intel:
+ // Older than Haswell.
+ fs.setIf(family == 6 && model < 60, AVXSLOW)
+ case AMD:
+ // Older than Zen 2
+ fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW)
+ }
+ }
+ }
+ // FMA3 can be used with SSE registers, so no OS support is strictly needed.
+ // fma3 and OSXSAVE needed.
+ const fma3Check = 1<<12 | 1<<27
+ fs.setIf(c&fma3Check == fma3Check, FMA3)
+
+ // Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
+ if mfi >= 7 {
+ _, ebx, ecx, edx := cpuidex(7, 0)
+ if fs.inSet(AVX) && (ebx&0x00000020) != 0 {
+ fs.set(AVX2)
+ }
+ // CPUID.(EAX=7, ECX=0).EBX
+ if (ebx & 0x00000008) != 0 {
+ fs.set(BMI1)
+ fs.setIf((ebx&0x00000100) != 0, BMI2)
+ }
+ fs.setIf(ebx&(1<<2) != 0, SGX)
+ fs.setIf(ebx&(1<<4) != 0, HLE)
+ fs.setIf(ebx&(1<<9) != 0, ERMS)
+ fs.setIf(ebx&(1<<11) != 0, RTM)
+ fs.setIf(ebx&(1<<14) != 0, MPX)
+ fs.setIf(ebx&(1<<18) != 0, RDSEED)
+ fs.setIf(ebx&(1<<19) != 0, ADX)
+ fs.setIf(ebx&(1<<29) != 0, SHA)
+
+ // CPUID.(EAX=7, ECX=0).ECX
+ fs.setIf(ecx&(1<<5) != 0, WAITPKG)
+ fs.setIf(ecx&(1<<7) != 0, CETSS)
+ fs.setIf(ecx&(1<<8) != 0, GFNI)
+ fs.setIf(ecx&(1<<9) != 0, VAES)
+ fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ)
+ fs.setIf(ecx&(1<<13) != 0, TME)
+ fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
+ fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
+ fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
+ fs.setIf(ecx&(1<<29) != 0, ENQCMD)
+ fs.setIf(ecx&(1<<30) != 0, SGXLC)
+
+ // CPUID.(EAX=7, ECX=0).EDX
+ fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
+ fs.setIf(edx&(1<<14) != 0, SERIALIZE)
+ fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
+ fs.setIf(edx&(1<<18) != 0, PCONFIG)
+ fs.setIf(edx&(1<<20) != 0, CETIBT)
+ fs.setIf(edx&(1<<26) != 0, IBPB)
+ fs.setIf(edx&(1<<27) != 0, STIBP)
+
+ // CPUID.(EAX=7, ECX=1)
+ eax1, _, _, _ := cpuidex(7, 1)
+ fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
+ fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
+ fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
+ fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT)
+ fs.setIf(eax1&(1<<22) != 0, HRESET)
+ fs.setIf(eax1&(1<<26) != 0, LAM)
+
+ // Only detect AVX-512 features if XGETBV is supported
+ if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
+ // Check for OS support
+ eax, _ := xgetbv(0)
+
+ // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
+ // ZMM16-ZMM31 state are enabled by OS)
+ /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
+ hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3
+ if runtime.GOOS == "darwin" {
+ hasAVX512 = fs.inSet(AVX) && darwinHasAVX512()
+ }
+ if hasAVX512 {
+ fs.setIf(ebx&(1<<16) != 0, AVX512F)
+ fs.setIf(ebx&(1<<17) != 0, AVX512DQ)
+ fs.setIf(ebx&(1<<21) != 0, AVX512IFMA)
+ fs.setIf(ebx&(1<<26) != 0, AVX512PF)
+ fs.setIf(ebx&(1<<27) != 0, AVX512ER)
+ fs.setIf(ebx&(1<<28) != 0, AVX512CD)
+ fs.setIf(ebx&(1<<30) != 0, AVX512BW)
+ fs.setIf(ebx&(1<<31) != 0, AVX512VL)
+ // ecx
+ fs.setIf(ecx&(1<<1) != 0, AVX512VBMI)
+ fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2)
+ fs.setIf(ecx&(1<<11) != 0, AVX512VNNI)
+ fs.setIf(ecx&(1<<12) != 0, AVX512BITALG)
+ fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ)
+ // edx
+ fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT)
+ fs.setIf(edx&(1<<22) != 0, AMXBF16)
+ fs.setIf(edx&(1<<23) != 0, AVX512FP16)
+ fs.setIf(edx&(1<<24) != 0, AMXTILE)
+ fs.setIf(edx&(1<<25) != 0, AMXINT8)
+ // eax1 = CPUID.(EAX=7, ECX=1).EAX
+ fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
+ }
+ }
+ }
+ // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
+ // EAX
+ // Bit 00: XSAVEOPT is available.
+ // Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set.
+ // Bit 02: Supports XGETBV with ECX = 1 if set.
+ // Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set.
+ // Bits 31 - 04: Reserved.
+ // EBX
+ // Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS.
+ // ECX
+ // Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1.
+ // EDX?
+ // Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved.
+ if mfi >= 0xd {
+ if fs.inSet(XSAVE) {
+ eax, _, _, _ := cpuidex(0xd, 1)
+ fs.setIf(eax&(1<<0) != 0, XSAVEOPT)
+ fs.setIf(eax&(1<<1) != 0, XSAVEC)
+ fs.setIf(eax&(1<<2) != 0, XGETBV1)
+ fs.setIf(eax&(1<<3) != 0, XSAVES)
+ }
+ }
+ if maxExtendedFunction() >= 0x80000001 {
+ _, _, c, d := cpuid(0x80000001)
+ if (c & (1 << 5)) != 0 {
+ fs.set(LZCNT)
+ fs.set(POPCNT)
+ }
+ // ECX
+ fs.setIf((c&(1<<0)) != 0, LAHF)
+ fs.setIf((c&(1<<2)) != 0, SVM)
+ fs.setIf((c&(1<<6)) != 0, SSE4A)
+ fs.setIf((c&(1<<10)) != 0, IBS)
+
+ // EDX
+ fs.setIf((d&(1<<31)) != 0, AMD3DNOW)
+ fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT)
+ fs.setIf((d&(1<<23)) != 0, MMX)
+ fs.setIf((d&(1<<22)) != 0, MMXEXT)
+ fs.setIf(d&(1<<20) != 0, NX)
+ fs.setIf(d&(1<<27) != 0, RDTSCP)
+
+ /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
+ * used unless the OS has AVX support. */
+ if fs.inSet(AVX) {
+ fs.setIf((c&0x00000800) != 0, XOP)
+ fs.setIf((c&0x00010000) != 0, FMA4)
+ }
+
+ }
+ if maxExtendedFunction() >= 0x80000007 {
+ _, b, _, d := cpuid(0x80000007)
+ fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW)
+ fs.setIf((b&(1<<1)) != 0, SUCCOR)
+ fs.setIf((b&(1<<2)) != 0, HWA)
+ fs.setIf((d&(1<<9)) != 0, CPBOOST)
+ }
+
+ if maxExtendedFunction() >= 0x80000008 {
+ _, b, _, _ := cpuid(0x80000008)
+ fs.setIf((b&(1<<9)) != 0, WBNOINVD)
+ fs.setIf((b&(1<<8)) != 0, MCOMMIT)
+ fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
+ fs.setIf((b&(1<<4)) != 0, RDPRU)
+ fs.setIf((b&(1<<3)) != 0, INVLPGB)
+ fs.setIf((b&(1<<1)) != 0, MSRIRC)
+ fs.setIf((b&(1<<0)) != 0, CLZERO)
+ }
+
+ if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A {
+ _, _, _, edx := cpuid(0x8000000A)
+ fs.setIf((edx>>0)&1 == 1, SVMNP)
+ fs.setIf((edx>>1)&1 == 1, LBRVIRT)
+ fs.setIf((edx>>2)&1 == 1, SVML)
+ fs.setIf((edx>>3)&1 == 1, NRIPS)
+ fs.setIf((edx>>4)&1 == 1, TSCRATEMSR)
+ fs.setIf((edx>>5)&1 == 1, VMCBCLEAN)
+ fs.setIf((edx>>6)&1 == 1, SVMFBASID)
+ fs.setIf((edx>>7)&1 == 1, SVMDA)
+ fs.setIf((edx>>10)&1 == 1, SVMPF)
+ fs.setIf((edx>>12)&1 == 1, SVMPFT)
+ }
+
+ if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
+ eax, _, _, _ := cpuid(0x8000001b)
+ fs.setIf((eax>>0)&1 == 1, IBSFFV)
+ fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM)
+ fs.setIf((eax>>2)&1 == 1, IBSOPSAM)
+ fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT)
+ fs.setIf((eax>>4)&1 == 1, IBSOPCNT)
+ fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
+ fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
+ fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
+ }
+
+ if maxExtendedFunction() >= 0x8000001f && vend == AMD {
+ a, _, _, _ := cpuid(0x8000001f)
+ fs.setIf((a>>0)&1 == 1, SME)
+ fs.setIf((a>>1)&1 == 1, SEV)
+ fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH)
+ fs.setIf((a>>3)&1 == 1, SEV_ES)
+ fs.setIf((a>>4)&1 == 1, SEV_SNP)
+ fs.setIf((a>>5)&1 == 1, VMPL)
+ fs.setIf((a>>10)&1 == 1, SME_COHERENT)
+ fs.setIf((a>>11)&1 == 1, SEV_64BIT)
+ fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED)
+ fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE)
+ fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP)
+ fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST)
+ fs.setIf((a>>16)&1 == 1, VTE)
+ fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
+ }
+
+ return fs
+}
+
+func valAsString(values ...uint32) []byte {
+ r := make([]byte, 4*len(values))
+ for i, v := range values {
+ dst := r[i*4:]
+ dst[0] = byte(v & 0xff)
+ dst[1] = byte((v >> 8) & 0xff)
+ dst[2] = byte((v >> 16) & 0xff)
+ dst[3] = byte((v >> 24) & 0xff)
+ switch {
+ case dst[0] == 0:
+ return r[:i*4]
+ case dst[1] == 0:
+ return r[:i*4+1]
+ case dst[2] == 0:
+ return r[:i*4+2]
+ case dst[3] == 0:
+ return r[:i*4+3]
+ }
+ }
+ return r
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
new file mode 100644
index 000000000..8587c3a1f
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s
@@ -0,0 +1,47 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build 386,!gccgo,!noasm,!appengine
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+ XORL CX, CX
+ MOVL op+0(FP), AX
+ CPUID
+ MOVL AX, eax+4(FP)
+ MOVL BX, ebx+8(FP)
+ MOVL CX, ecx+12(FP)
+ MOVL DX, edx+16(FP)
+ RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+ MOVL op+0(FP), AX
+ MOVL op2+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func xgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+ MOVL index+0(FP), CX
+ BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+ MOVL AX, eax+4(FP)
+ MOVL DX, edx+8(FP)
+ RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+ BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+ MOVL AX, eax+0(FP)
+ MOVL BX, ebx+4(FP)
+ MOVL CX, ecx+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// func asmDarwinHasAVX512() bool
+TEXT ·asmDarwinHasAVX512(SB), 7, $0
+ MOVL $0, eax+0(FP)
+ RET
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
new file mode 100644
index 000000000..bc11f8942
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s
@@ -0,0 +1,72 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build amd64,!gccgo,!noasm,!appengine
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+ XORQ CX, CX
+ MOVL op+0(FP), AX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+ MOVL op+0(FP), AX
+ MOVL op2+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func asmXgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+ MOVL index+0(FP), CX
+ BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+ MOVL AX, eax+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+ BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+ MOVL AX, eax+0(FP)
+ MOVL BX, ebx+4(FP)
+ MOVL CX, ecx+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// From https://go-review.googlesource.com/c/sys/+/285572/
+// func asmDarwinHasAVX512() bool
+TEXT ·asmDarwinHasAVX512(SB), 7, $0-1
+ MOVB $0, ret+0(FP) // default to false
+
+#ifdef GOOS_darwin // return if not darwin
+#ifdef GOARCH_amd64 // return if not amd64
+// These values from:
+// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
+#define commpage64_base_address 0x00007fffffe00000
+#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
+#define commpage64_version (commpage64_base_address+0x01E)
+#define hasAVX512F 0x0000004000000000
+ MOVQ $commpage64_version, BX
+ MOVW (BX), AX
+ CMPW AX, $13 // versions < 13 do not support AVX512
+ JL no_avx512
+ MOVQ $commpage64_cpu_capabilities64, BX
+ MOVQ (BX), AX
+ MOVQ $hasAVX512F, CX
+ ANDQ CX, AX
+ JZ no_avx512
+ MOVB $1, ret+0(FP)
+
+no_avx512:
+#endif
+#endif
+ RET
+
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
new file mode 100644
index 000000000..b31d6aec4
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s
@@ -0,0 +1,26 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build arm64,!gccgo,!noasm,!appengine
+
+// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt
+
+// func getMidr
+TEXT ·getMidr(SB), 7, $0
+ WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */
+ MOVD R0, midr+0(FP)
+ RET
+
+// func getProcFeatures
+TEXT ·getProcFeatures(SB), 7, $0
+ WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */
+ MOVD R0, procFeatures+0(FP)
+ RET
+
+// func getInstAttributes
+TEXT ·getInstAttributes(SB), 7, $0
+ WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */
+ WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */
+ MOVD R0, instAttrReg0+0(FP)
+ MOVD R1, instAttrReg1+8(FP)
+ RET
+
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
new file mode 100644
index 000000000..9a53504a0
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
@@ -0,0 +1,247 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build arm64 && !gccgo && !noasm && !appengine
+// +build arm64,!gccgo,!noasm,!appengine
+
+package cpuid
+
+import "runtime"
+
+func getMidr() (midr uint64)
+func getProcFeatures() (procFeatures uint64)
+func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
+
+func initCPU() {
+ cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
+ rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
+}
+
+func addInfo(c *CPUInfo, safe bool) {
+ // Seems to be safe to assume on ARM64
+ c.CacheLine = 64
+ detectOS(c)
+
+ // ARM64 disabled since it may crash if interrupt is not intercepted by OS.
+ if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" {
+ return
+ }
+ midr := getMidr()
+
+ // MIDR_EL1 - Main ID Register
+ // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | Implementer | [31-24] | y |
+ // |--------------------------------------------------|
+ // | Variant | [23-20] | y |
+ // |--------------------------------------------------|
+ // | Architecture | [19-16] | y |
+ // |--------------------------------------------------|
+ // | PartNum | [15-4] | y |
+ // |--------------------------------------------------|
+ // | Revision | [3-0] | y |
+ // x--------------------------------------------------x
+
+ switch (midr >> 24) & 0xff {
+ case 0xC0:
+ c.VendorString = "Ampere Computing"
+ c.VendorID = Ampere
+ case 0x41:
+ c.VendorString = "Arm Limited"
+ c.VendorID = ARM
+ case 0x42:
+ c.VendorString = "Broadcom Corporation"
+ c.VendorID = Broadcom
+ case 0x43:
+ c.VendorString = "Cavium Inc"
+ c.VendorID = Cavium
+ case 0x44:
+ c.VendorString = "Digital Equipment Corporation"
+ c.VendorID = DEC
+ case 0x46:
+ c.VendorString = "Fujitsu Ltd"
+ c.VendorID = Fujitsu
+ case 0x49:
+ c.VendorString = "Infineon Technologies AG"
+ c.VendorID = Infineon
+ case 0x4D:
+ c.VendorString = "Motorola or Freescale Semiconductor Inc"
+ c.VendorID = Motorola
+ case 0x4E:
+ c.VendorString = "NVIDIA Corporation"
+ c.VendorID = NVIDIA
+ case 0x50:
+ c.VendorString = "Applied Micro Circuits Corporation"
+ c.VendorID = AMCC
+ case 0x51:
+ c.VendorString = "Qualcomm Inc"
+ c.VendorID = Qualcomm
+ case 0x56:
+ c.VendorString = "Marvell International Ltd"
+ c.VendorID = Marvell
+ case 0x69:
+ c.VendorString = "Intel Corporation"
+ c.VendorID = Intel
+ }
+
+ // Lower 4 bits: Architecture
+ // Architecture Meaning
+ // 0b0001 Armv4.
+ // 0b0010 Armv4T.
+ // 0b0011 Armv5 (obsolete).
+ // 0b0100 Armv5T.
+ // 0b0101 Armv5TE.
+ // 0b0110 Armv5TEJ.
+ // 0b0111 Armv6.
+ // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'.
+ // Upper 4 bit: Variant
+ // An IMPLEMENTATION DEFINED variant number.
+ // Typically, this field is used to distinguish between different product variants, or major revisions of a product.
+ c.Family = int(midr>>16) & 0xff
+
+ // PartNum, bits [15:4]
+ // An IMPLEMENTATION DEFINED primary part number for the device.
+ // On processors implemented by Arm, if the top four bits of the primary
+ // part number are 0x0 or 0x7, the variant and architecture are encoded differently.
+ // Revision, bits [3:0]
+ // An IMPLEMENTATION DEFINED revision number for the device.
+ c.Model = int(midr) & 0xffff
+
+ procFeatures := getProcFeatures()
+
+ // ID_AA64PFR0_EL1 - Processor Feature Register 0
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | DIT | [51-48] | y |
+ // |--------------------------------------------------|
+ // | SVE | [35-32] | y |
+ // |--------------------------------------------------|
+ // | GIC | [27-24] | n |
+ // |--------------------------------------------------|
+ // | AdvSIMD | [23-20] | y |
+ // |--------------------------------------------------|
+ // | FP | [19-16] | y |
+ // |--------------------------------------------------|
+ // | EL3 | [15-12] | n |
+ // |--------------------------------------------------|
+ // | EL2 | [11-8] | n |
+ // |--------------------------------------------------|
+ // | EL1 | [7-4] | n |
+ // |--------------------------------------------------|
+ // | EL0 | [3-0] | n |
+ // x--------------------------------------------------x
+
+ var f flagSet
+ // if procFeatures&(0xf<<48) != 0 {
+ // fmt.Println("DIT")
+ // }
+ f.setIf(procFeatures&(0xf<<32) != 0, SVE)
+ if procFeatures&(0xf<<20) != 15<<20 {
+ f.set(ASIMD)
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1
+ // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic.
+ f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP)
+ }
+ f.setIf(procFeatures&(0xf<<16) != 0, FP)
+
+ instAttrReg0, instAttrReg1 := getInstAttributes()
+
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
+ //
+ // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | TS | [55-52] | y |
+ // |--------------------------------------------------|
+ // | FHM | [51-48] | y |
+ // |--------------------------------------------------|
+ // | DP | [47-44] | y |
+ // |--------------------------------------------------|
+ // | SM4 | [43-40] | y |
+ // |--------------------------------------------------|
+ // | SM3 | [39-36] | y |
+ // |--------------------------------------------------|
+ // | SHA3 | [35-32] | y |
+ // |--------------------------------------------------|
+ // | RDM | [31-28] | y |
+ // |--------------------------------------------------|
+ // | ATOMICS | [23-20] | y |
+ // |--------------------------------------------------|
+ // | CRC32 | [19-16] | y |
+ // |--------------------------------------------------|
+ // | SHA2 | [15-12] | y |
+ // |--------------------------------------------------|
+ // | SHA1 | [11-8] | y |
+ // |--------------------------------------------------|
+ // | AES | [7-4] | y |
+ // x--------------------------------------------------x
+
+ // if instAttrReg0&(0xf<<52) != 0 {
+ // fmt.Println("TS")
+ // }
+ // if instAttrReg0&(0xf<<48) != 0 {
+ // fmt.Println("FHM")
+ // }
+ f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP)
+ f.setIf(instAttrReg0&(0xf<<40) != 0, SM4)
+ f.setIf(instAttrReg0&(0xf<<36) != 0, SM3)
+ f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3)
+ f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM)
+ f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS)
+ f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32)
+ f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2)
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
+ // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented.
+ f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512)
+ f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1)
+ f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM)
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
+ // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities.
+ f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL)
+
+ // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1
+ //
+ // ID_AA64ISAR1_EL1 - Instruction set attribute register 1
+ // x--------------------------------------------------x
+ // | Name | bits | visible |
+ // |--------------------------------------------------|
+ // | GPI | [31-28] | y |
+ // |--------------------------------------------------|
+ // | GPA | [27-24] | y |
+ // |--------------------------------------------------|
+ // | LRCPC | [23-20] | y |
+ // |--------------------------------------------------|
+ // | FCMA | [19-16] | y |
+ // |--------------------------------------------------|
+ // | JSCVT | [15-12] | y |
+ // |--------------------------------------------------|
+ // | API | [11-8] | y |
+ // |--------------------------------------------------|
+ // | APA | [7-4] | y |
+ // |--------------------------------------------------|
+ // | DPB | [3-0] | y |
+ // x--------------------------------------------------x
+
+ // if instAttrReg1&(0xf<<28) != 0 {
+ // fmt.Println("GPI")
+ // }
+ f.setIf(instAttrReg1&(0xf<<28) != 24, GPA)
+ f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC)
+ f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA)
+ f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT)
+ // if instAttrReg1&(0xf<<8) != 0 {
+ // fmt.Println("API")
+ // }
+ // if instAttrReg1&(0xf<<4) != 0 {
+ // fmt.Println("APA")
+ // }
+ f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP)
+
+ // Store
+ c.featureSet.or(f)
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
new file mode 100644
index 000000000..9636c2bc1
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine
+// +build !amd64,!386,!arm64 gccgo noasm appengine
+
+package cpuid
+
+func initCPU() {
+ cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
+ xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
+ rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
+}
+
+func addInfo(info *CPUInfo, safe bool) {}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
new file mode 100644
index 000000000..35678d8a3
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine)
+// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine
+
+package cpuid
+
+func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+func asmXgetbv(index uint32) (eax, edx uint32)
+func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+func asmDarwinHasAVX512() bool
+
+func initCPU() {
+ cpuid = asmCpuid
+ cpuidex = asmCpuidex
+ xgetbv = asmXgetbv
+ rdtscpAsm = asmRdtscpAsm
+ darwinHasAVX512 = asmDarwinHasAVX512
+}
+
+func addInfo(c *CPUInfo, safe bool) {
+ c.maxFunc = maxFunctionID()
+ c.maxExFunc = maxExtendedFunction()
+ c.BrandName = brandName()
+ c.CacheLine = cacheLine()
+ c.Family, c.Model = familyModel()
+ c.featureSet = support()
+ c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
+ c.ThreadsPerCore = threadsPerCore()
+ c.LogicalCores = logicalCores()
+ c.PhysicalCores = physicalCores()
+ c.VendorID, c.VendorString = vendorID()
+ c.cacheSize()
+ c.frequencies()
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
new file mode 100644
index 000000000..a9b3e36c7
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -0,0 +1,233 @@
+// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT.
+
+package cpuid
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ADX-1]
+ _ = x[AESNI-2]
+ _ = x[AMD3DNOW-3]
+ _ = x[AMD3DNOWEXT-4]
+ _ = x[AMXBF16-5]
+ _ = x[AMXINT8-6]
+ _ = x[AMXTILE-7]
+ _ = x[AVX-8]
+ _ = x[AVX2-9]
+ _ = x[AVX512BF16-10]
+ _ = x[AVX512BITALG-11]
+ _ = x[AVX512BW-12]
+ _ = x[AVX512CD-13]
+ _ = x[AVX512DQ-14]
+ _ = x[AVX512ER-15]
+ _ = x[AVX512F-16]
+ _ = x[AVX512FP16-17]
+ _ = x[AVX512IFMA-18]
+ _ = x[AVX512PF-19]
+ _ = x[AVX512VBMI-20]
+ _ = x[AVX512VBMI2-21]
+ _ = x[AVX512VL-22]
+ _ = x[AVX512VNNI-23]
+ _ = x[AVX512VP2INTERSECT-24]
+ _ = x[AVX512VPOPCNTDQ-25]
+ _ = x[AVXSLOW-26]
+ _ = x[AVXVNNI-27]
+ _ = x[BMI1-28]
+ _ = x[BMI2-29]
+ _ = x[CETIBT-30]
+ _ = x[CETSS-31]
+ _ = x[CLDEMOTE-32]
+ _ = x[CLMUL-33]
+ _ = x[CLZERO-34]
+ _ = x[CMOV-35]
+ _ = x[CMPSB_SCADBS_SHORT-36]
+ _ = x[CMPXCHG8-37]
+ _ = x[CPBOOST-38]
+ _ = x[CX16-39]
+ _ = x[ENQCMD-40]
+ _ = x[ERMS-41]
+ _ = x[F16C-42]
+ _ = x[FMA3-43]
+ _ = x[FMA4-44]
+ _ = x[FXSR-45]
+ _ = x[FXSROPT-46]
+ _ = x[GFNI-47]
+ _ = x[HLE-48]
+ _ = x[HRESET-49]
+ _ = x[HTT-50]
+ _ = x[HWA-51]
+ _ = x[HYPERVISOR-52]
+ _ = x[IBPB-53]
+ _ = x[IBS-54]
+ _ = x[IBSBRNTRGT-55]
+ _ = x[IBSFETCHSAM-56]
+ _ = x[IBSFFV-57]
+ _ = x[IBSOPCNT-58]
+ _ = x[IBSOPCNTEXT-59]
+ _ = x[IBSOPSAM-60]
+ _ = x[IBSRDWROPCNT-61]
+ _ = x[IBSRIPINVALIDCHK-62]
+ _ = x[IBS_PREVENTHOST-63]
+ _ = x[INT_WBINVD-64]
+ _ = x[INVLPGB-65]
+ _ = x[LAHF-66]
+ _ = x[LAM-67]
+ _ = x[LBRVIRT-68]
+ _ = x[LZCNT-69]
+ _ = x[MCAOVERFLOW-70]
+ _ = x[MCOMMIT-71]
+ _ = x[MMX-72]
+ _ = x[MMXEXT-73]
+ _ = x[MOVBE-74]
+ _ = x[MOVDIR64B-75]
+ _ = x[MOVDIRI-76]
+ _ = x[MOVSB_ZL-77]
+ _ = x[MPX-78]
+ _ = x[MSRIRC-79]
+ _ = x[MSR_PAGEFLUSH-80]
+ _ = x[NRIPS-81]
+ _ = x[NX-82]
+ _ = x[OSXSAVE-83]
+ _ = x[PCONFIG-84]
+ _ = x[POPCNT-85]
+ _ = x[RDPRU-86]
+ _ = x[RDRAND-87]
+ _ = x[RDSEED-88]
+ _ = x[RDTSCP-89]
+ _ = x[RTM-90]
+ _ = x[RTM_ALWAYS_ABORT-91]
+ _ = x[SCE-92]
+ _ = x[SERIALIZE-93]
+ _ = x[SEV-94]
+ _ = x[SEV_64BIT-95]
+ _ = x[SEV_ALTERNATIVE-96]
+ _ = x[SEV_DEBUGSWAP-97]
+ _ = x[SEV_ES-98]
+ _ = x[SEV_RESTRICTED-99]
+ _ = x[SEV_SNP-100]
+ _ = x[SGX-101]
+ _ = x[SGXLC-102]
+ _ = x[SHA-103]
+ _ = x[SME-104]
+ _ = x[SME_COHERENT-105]
+ _ = x[SSE-106]
+ _ = x[SSE2-107]
+ _ = x[SSE3-108]
+ _ = x[SSE4-109]
+ _ = x[SSE42-110]
+ _ = x[SSE4A-111]
+ _ = x[SSSE3-112]
+ _ = x[STIBP-113]
+ _ = x[STOSB_SHORT-114]
+ _ = x[SUCCOR-115]
+ _ = x[SVM-116]
+ _ = x[SVMDA-117]
+ _ = x[SVMFBASID-118]
+ _ = x[SVML-119]
+ _ = x[SVMNP-120]
+ _ = x[SVMPF-121]
+ _ = x[SVMPFT-122]
+ _ = x[TBM-123]
+ _ = x[TME-124]
+ _ = x[TSCRATEMSR-125]
+ _ = x[TSXLDTRK-126]
+ _ = x[VAES-127]
+ _ = x[VMCBCLEAN-128]
+ _ = x[VMPL-129]
+ _ = x[VMSA_REGPROT-130]
+ _ = x[VMX-131]
+ _ = x[VPCLMULQDQ-132]
+ _ = x[VTE-133]
+ _ = x[WAITPKG-134]
+ _ = x[WBNOINVD-135]
+ _ = x[X87-136]
+ _ = x[XGETBV1-137]
+ _ = x[XOP-138]
+ _ = x[XSAVE-139]
+ _ = x[XSAVEC-140]
+ _ = x[XSAVEOPT-141]
+ _ = x[XSAVES-142]
+ _ = x[AESARM-143]
+ _ = x[ARMCPUID-144]
+ _ = x[ASIMD-145]
+ _ = x[ASIMDDP-146]
+ _ = x[ASIMDHP-147]
+ _ = x[ASIMDRDM-148]
+ _ = x[ATOMICS-149]
+ _ = x[CRC32-150]
+ _ = x[DCPOP-151]
+ _ = x[EVTSTRM-152]
+ _ = x[FCMA-153]
+ _ = x[FP-154]
+ _ = x[FPHP-155]
+ _ = x[GPA-156]
+ _ = x[JSCVT-157]
+ _ = x[LRCPC-158]
+ _ = x[PMULL-159]
+ _ = x[SHA1-160]
+ _ = x[SHA2-161]
+ _ = x[SHA3-162]
+ _ = x[SHA512-163]
+ _ = x[SM3-164]
+ _ = x[SM4-165]
+ _ = x[SVE-166]
+ _ = x[lastID-167]
+ _ = x[firstID-0]
+}
+
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107}
+
+func (i FeatureID) String() string {
+ if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
+ return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VendorUnknown-0]
+ _ = x[Intel-1]
+ _ = x[AMD-2]
+ _ = x[VIA-3]
+ _ = x[Transmeta-4]
+ _ = x[NSC-5]
+ _ = x[KVM-6]
+ _ = x[MSVM-7]
+ _ = x[VMware-8]
+ _ = x[XenHVM-9]
+ _ = x[Bhyve-10]
+ _ = x[Hygon-11]
+ _ = x[SiS-12]
+ _ = x[RDC-13]
+ _ = x[Ampere-14]
+ _ = x[ARM-15]
+ _ = x[Broadcom-16]
+ _ = x[Cavium-17]
+ _ = x[DEC-18]
+ _ = x[Fujitsu-19]
+ _ = x[Infineon-20]
+ _ = x[Motorola-21]
+ _ = x[NVIDIA-22]
+ _ = x[AMCC-23]
+ _ = x[Qualcomm-24]
+ _ = x[Marvell-25]
+ _ = x[lastVendor-26]
+}
+
+const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor"
+
+var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155}
+
+func (i Vendor) String() string {
+ if i < 0 || i >= Vendor(len(_Vendor_index)-1) {
+ return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]]
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
new file mode 100644
index 000000000..d91d02109
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
@@ -0,0 +1,121 @@
+// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
+
+package cpuid
+
+import (
+ "runtime"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+func detectOS(c *CPUInfo) bool {
+ if runtime.GOOS != "ios" {
+ tryToFillCPUInfoFomSysctl(c)
+ }
+ // There are no hw.optional sysctl values for the below features on Mac OS 11.0
+ // to detect their supported state dynamically. Assume the CPU features that
+ // Apple Silicon M1 supports to be available as a minimal set of features
+ // to all Go programs running on darwin/arm64.
+ // TODO: Add more if we know them.
+ c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2)
+
+ return true
+}
+
+func sysctlGetBool(name string) bool {
+ value, err := unix.SysctlUint32(name)
+ if err != nil {
+ return false
+ }
+ return value != 0
+}
+
+func sysctlGetString(name string) string {
+ value, err := unix.Sysctl(name)
+ if err != nil {
+ return ""
+ }
+ return value
+}
+
+func sysctlGetInt(unknown int, names ...string) int {
+ for _, name := range names {
+ value, err := unix.SysctlUint32(name)
+ if err != nil {
+ continue
+ }
+ if value != 0 {
+ return int(value)
+ }
+ }
+ return unknown
+}
+
+func sysctlGetInt64(unknown int, names ...string) int {
+ for _, name := range names {
+ value64, err := unix.SysctlUint64(name)
+ if err != nil {
+ continue
+ }
+ if int(value64) != unknown {
+ return int(value64)
+ }
+ }
+ return unknown
+}
+
+func setFeature(c *CPUInfo, name string, feature FeatureID) {
+ c.featureSet.setIf(sysctlGetBool(name), feature)
+}
+func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
+ c.BrandName = sysctlGetString("machdep.cpu.brand_string")
+
+ if len(c.BrandName) != 0 {
+ c.VendorString = strings.Fields(c.BrandName)[0]
+ }
+
+ c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu")
+ c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") /
+ sysctlGetInt(1, "hw.physicalcpu")
+ c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count")
+ c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily")
+ c.Model = sysctlGetInt(0, "machdep.cpu.model")
+ c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize")
+ c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize")
+ c.Cache.L1D = sysctlGetInt64(-1, "hw.l1icachesize")
+ c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
+ c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
+
+ // from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile
+ setFeature(c, "hw.optional.arm.FEAT_AES", AESARM)
+ setFeature(c, "hw.optional.AdvSIMD", ASIMD)
+ setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP)
+ setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM)
+ setFeature(c, "hw.optional.FEAT_CRC32", CRC32)
+ setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP)
+ // setFeature(c, "", EVTSTRM)
+ setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA)
+ setFeature(c, "hw.optional.arm.FEAT_FP", FP)
+ setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP)
+ setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA)
+ setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT)
+ setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC)
+ setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL)
+ setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1)
+ setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2)
+ setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3)
+ setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512)
+ // setFeature(c, "", SM3)
+ // setFeature(c, "", SM4)
+ setFeature(c, "hw.optional.arm.FEAT_SVE", SVE)
+
+ // from empirical observation
+ setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP)
+ setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS)
+ setFeature(c, "hw.optional.floatingpoint", FP)
+ setFeature(c, "hw.optional.armv8_2_sha3", SHA3)
+ setFeature(c, "hw.optional.armv8_2_sha512", SHA512)
+ setFeature(c, "hw.optional.armv8_3_compnum", FCMA)
+ setFeature(c, "hw.optional.armv8_crc32", CRC32)
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
new file mode 100644
index 000000000..ee278b9e4
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file located
+// here https://github.com/golang/sys/blob/master/LICENSE
+
+package cpuid
+
+import (
+ "encoding/binary"
+ "io/ioutil"
+ "runtime"
+)
+
+// HWCAP bits.
+const (
+ hwcap_FP = 1 << 0
+ hwcap_ASIMD = 1 << 1
+ hwcap_EVTSTRM = 1 << 2
+ hwcap_AES = 1 << 3
+ hwcap_PMULL = 1 << 4
+ hwcap_SHA1 = 1 << 5
+ hwcap_SHA2 = 1 << 6
+ hwcap_CRC32 = 1 << 7
+ hwcap_ATOMICS = 1 << 8
+ hwcap_FPHP = 1 << 9
+ hwcap_ASIMDHP = 1 << 10
+ hwcap_CPUID = 1 << 11
+ hwcap_ASIMDRDM = 1 << 12
+ hwcap_JSCVT = 1 << 13
+ hwcap_FCMA = 1 << 14
+ hwcap_LRCPC = 1 << 15
+ hwcap_DCPOP = 1 << 16
+ hwcap_SHA3 = 1 << 17
+ hwcap_SM3 = 1 << 18
+ hwcap_SM4 = 1 << 19
+ hwcap_ASIMDDP = 1 << 20
+ hwcap_SHA512 = 1 << 21
+ hwcap_SVE = 1 << 22
+ hwcap_ASIMDFHM = 1 << 23
+)
+
+func detectOS(c *CPUInfo) bool {
+ // For now assuming no hyperthreading is reasonable.
+ c.LogicalCores = runtime.NumCPU()
+ c.PhysicalCores = c.LogicalCores
+ c.ThreadsPerCore = 1
+ if hwcap == 0 {
+ // We did not get values from the runtime.
+ // Try reading /proc/self/auxv
+
+ // From https://github.com/golang/sys
+ const (
+ _AT_HWCAP = 16
+ _AT_HWCAP2 = 26
+
+ uintSize = int(32 << (^uint(0) >> 63))
+ )
+
+ buf, err := ioutil.ReadFile("/proc/self/auxv")
+ if err != nil {
+ // e.g. on android /proc/self/auxv is not accessible, so silently
+ // ignore the error and leave Initialized = false. On some
+ // architectures (e.g. arm64) doinit() implements a fallback
+ // readout and will set Initialized = true again.
+ return false
+ }
+ bo := binary.LittleEndian
+ for len(buf) >= 2*(uintSize/8) {
+ var tag, val uint
+ switch uintSize {
+ case 32:
+ tag = uint(bo.Uint32(buf[0:]))
+ val = uint(bo.Uint32(buf[4:]))
+ buf = buf[8:]
+ case 64:
+ tag = uint(bo.Uint64(buf[0:]))
+ val = uint(bo.Uint64(buf[8:]))
+ buf = buf[16:]
+ }
+ switch tag {
+ case _AT_HWCAP:
+ hwcap = val
+ case _AT_HWCAP2:
+ // Not used
+ }
+ }
+ if hwcap == 0 {
+ return false
+ }
+ }
+
+ // HWCap was populated by the runtime from the auxiliary vector.
+ // Use HWCap information since reading aarch64 system registers
+ // is not supported in user space on older linux kernels.
+ c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM)
+ c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID)
+ c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32)
+ c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM)
+ c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA)
+ c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP)
+ c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT)
+ c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC)
+ c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4)
+ c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE)
+
+ // The Samsung S9+ kernel reports support for atomics, but not all cores
+ // actually support them, resulting in SIGILL. See issue #28431.
+ // TODO(elias.naur): Only disable the optimization on bad chipsets on android.
+ c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS)
+
+ return true
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
new file mode 100644
index 000000000..8733ba343
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build arm64 && !linux && !darwin
+// +build arm64,!linux,!darwin
+
+package cpuid
+
+import "runtime"
+
+func detectOS(c *CPUInfo) bool {
+ c.PhysicalCores = runtime.NumCPU()
+ // For now assuming 1 thread per core...
+ c.ThreadsPerCore = 1
+ c.LogicalCores = c.PhysicalCores
+ return false
+}
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
new file mode 100644
index 000000000..f8f201b5f
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build nounsafe
+// +build nounsafe
+
+package cpuid
+
+var hwcap uint
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
new file mode 100644
index 000000000..92af622eb
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
+
+//go:build !nounsafe
+// +build !nounsafe
+
+package cpuid
+
+import _ "unsafe" // needed for go:linkname
+
+//go:linkname hwcap internal/cpu.HWCap
+var hwcap uint
diff --git a/test/integration/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/test/integration/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
new file mode 100644
index 000000000..471d986d2
--- /dev/null
+++ b/test/integration/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+go tool dist list | while IFS=/ read os arch; do
+ echo "Checking $os/$arch..."
+ echo " normal"
+ GOARCH=$arch GOOS=$os go build -o /dev/null .
+ echo " noasm"
+ GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null .
+ echo " appengine"
+ GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null .
+ echo " noasm,appengine"
+ GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null .
+done
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/.gitignore b/test/integration/vendor/github.com/lufia/plan9stats/.gitignore
new file mode 100644
index 000000000..f1c181ec9
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/LICENSE b/test/integration/vendor/github.com/lufia/plan9stats/LICENSE
new file mode 100644
index 000000000..a6d47e807
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2019, KADOTA, Kyohei
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/README.md b/test/integration/vendor/github.com/lufia/plan9stats/README.md
new file mode 100644
index 000000000..04bdcef73
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/README.md
@@ -0,0 +1,13 @@
+# plan9stats
+A module for retrieving statistics of Plan 9
+
+[![GoDev][godev-image]][godev-url]
+[![Actions Status][actions-image]][actions-url]
+[![Coverage Status][coveralls-image]][coveralls-url]
+
+[godev-image]: https://pkg.go.dev/badge/github.com/lufia/plan9stats
+[godev-url]: https://pkg.go.dev/github.com/lufia/plan9stats
+[actions-image]: https://github.com/lufia/plan9stats/workflows/Test/badge.svg?branch=main
+[actions-url]: https://github.com/lufia/plan9stats/actions?workflow=Test
+[coveralls-image]: https://coveralls.io/repos/github/lufia/plan9stats/badge.svg
+[coveralls-url]: https://coveralls.io/github/lufia/plan9stats
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/cpu.go b/test/integration/vendor/github.com/lufia/plan9stats/cpu.go
new file mode 100644
index 000000000..a101b9119
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/cpu.go
@@ -0,0 +1,288 @@
+package stats
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// CPUType represents /dev/cputype.
+type CPUType struct {
+ Name string
+ Clock int // clock rate in MHz
+}
+
+func ReadCPUType(ctx context.Context, opts ...Option) (*CPUType, error) {
+ cfg := newConfig(opts...)
+ var c CPUType
+ if err := readCPUType(cfg.rootdir, &c); err != nil {
+ return nil, err
+ }
+ return &c, nil
+}
+
+type SysStats struct {
+ ID int
+ NumCtxSwitch int64
+ NumInterrupt int64
+ NumSyscall int64
+ NumFault int64
+ NumTLBFault int64
+ NumTLBPurge int64
+ LoadAvg int64 // in units of milli-CPUs and is decayed over time
+ Idle int // percentage
+ Interrupt int // percentage
+}
+
+// ReadSysStats reads system statistics from /dev/sysstat.
+func ReadSysStats(ctx context.Context, opts ...Option) ([]*SysStats, error) {
+ cfg := newConfig(opts...)
+ file := filepath.Join(cfg.rootdir, "/dev/sysstat")
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ var stats []*SysStats
+ for scanner.Scan() {
+ a := strings.Fields(scanner.Text())
+ if len(a) != 10 {
+ continue
+ }
+ var (
+ p intParser
+ stat SysStats
+ )
+ stat.ID = p.ParseInt(a[0], 10)
+ stat.NumCtxSwitch = p.ParseInt64(a[1], 10)
+ stat.NumInterrupt = p.ParseInt64(a[2], 10)
+ stat.NumSyscall = p.ParseInt64(a[3], 10)
+ stat.NumFault = p.ParseInt64(a[4], 10)
+ stat.NumTLBFault = p.ParseInt64(a[5], 10)
+ stat.NumTLBPurge = p.ParseInt64(a[6], 10)
+ stat.LoadAvg = p.ParseInt64(a[7], 10)
+ stat.Idle = p.ParseInt(a[8], 10)
+ stat.Interrupt = p.ParseInt(a[9], 10)
+ if err := p.Err(); err != nil {
+ return nil, err
+ }
+ stats = append(stats, &stat)
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return stats, nil
+}
+
+func readCPUType(rootdir string, c *CPUType) error {
+ file := filepath.Join(rootdir, "/dev/cputype")
+ b, err := ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ b = bytes.TrimSpace(b)
+ i := bytes.LastIndexByte(b, ' ')
+ if i < 0 {
+ return fmt.Errorf("%s: invalid format", file)
+ }
+ clock, err := strconv.Atoi(string(b[i+1:]))
+ if err != nil {
+ return err
+ }
+ c.Name = string(b[:i])
+ c.Clock = clock
+ return nil
+}
+
+// Time represents /dev/time.
+type Time struct {
+ Unix time.Duration
+ UnixNano time.Duration
+ Ticks int64 // clock ticks
+ Freq int64 //cloc frequency
+}
+
+// Uptime returns uptime.
+func (t *Time) Uptime() time.Duration {
+ v := float64(t.Ticks) / float64(t.Freq)
+ return time.Duration(v*1000_000_000) * time.Nanosecond
+}
+
+func ReadTime(ctx context.Context, opts ...Option) (*Time, error) {
+ cfg := newConfig(opts...)
+ file := filepath.Join(cfg.rootdir, "/dev/time")
+ var t Time
+ if err := readTime(file, &t); err != nil {
+ return nil, err
+ }
+ return &t, nil
+}
+
+// ProcStatus represents a /proc/n/status.
+type ProcStatus struct {
+ Name string
+ User string
+ State string
+ Times CPUTime
+ MemUsed int64 // in units of 1024 bytes
+ BasePriority uint32 // 0(low) to 19(high)
+ Priority uint32 // 0(low) to 19(high)
+}
+
+// CPUTime represents /dev/cputime or a part of /proc/n/status.
+type CPUTime struct {
+ User time.Duration // the time in user mode (millisecconds)
+ Sys time.Duration
+ Real time.Duration
+ ChildUser time.Duration // exited children and descendants time in user mode
+ ChildSys time.Duration
+ ChildReal time.Duration
+}
+
+// CPUStats emulates Linux's /proc/stat.
+type CPUStats struct {
+ User time.Duration
+ Sys time.Duration
+ Idle time.Duration
+}
+
+func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) {
+ cfg := newConfig(opts...)
+ a, err := ReadSysStats(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ dir := filepath.Join(cfg.rootdir, "/proc")
+ d, err := os.Open(dir)
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+ var up uint32parser
+ pids := make([]uint32, len(names))
+ for i, s := range names {
+ pids[i] = up.Parse(s)
+ }
+ if up.err != nil {
+ return nil, err
+ }
+ sort.Slice(pids, func(i, j int) bool {
+ return pids[i] < pids[j]
+ })
+
+ var stat CPUStats
+ for _, pid := range pids {
+ s := strconv.FormatUint(uint64(pid), 10)
+ file := filepath.Join(dir, s, "status")
+ var p ProcStatus
+ if err := readProcStatus(file, &p); err != nil {
+ return nil, err
+ }
+ stat.User += p.Times.User
+ stat.Sys += p.Times.Sys
+ }
+
+ var t Time
+ file := filepath.Join(cfg.rootdir, "/dev/time")
+ if err := readTime(file, &t); err != nil {
+ return nil, err
+ }
+ // In multi-processor host, Idle should multiple by number of cores.
+ u := t.Uptime() * time.Duration(len(a))
+ stat.Idle = u - stat.User - stat.Sys
+ return &stat, nil
+}
+
+func readProcStatus(file string, p *ProcStatus) error {
+ b, err := ioutil.ReadFile(file)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ fields := strings.Fields(string(b))
+ if len(fields) != 12 {
+ return errors.New("invalid format")
+ }
+ p.Name = string(fields[0])
+ p.User = string(fields[1])
+ p.State = string(fields[2])
+ var up uint32parser
+ p.Times.User = time.Duration(up.Parse(fields[3])) * time.Millisecond
+ p.Times.Sys = time.Duration(up.Parse(fields[4])) * time.Millisecond
+ p.Times.Real = time.Duration(up.Parse(fields[5])) * time.Millisecond
+ p.Times.ChildUser = time.Duration(up.Parse(fields[6])) * time.Millisecond
+ p.Times.ChildSys = time.Duration(up.Parse(fields[7])) * time.Millisecond
+ p.Times.ChildReal = time.Duration(up.Parse(fields[8])) * time.Millisecond
+ p.MemUsed, err = strconv.ParseInt(fields[9], 10, 64)
+ if err != nil {
+ return err
+ }
+ p.BasePriority = up.Parse(fields[10])
+ p.Priority = up.Parse(fields[11])
+ return up.err
+}
+
+func readTime(file string, t *Time) error {
+ b, err := ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ fields := strings.Fields(string(b))
+ if len(fields) != 4 {
+ return errors.New("invalid format")
+ }
+ n, err := strconv.ParseInt(fields[0], 10, 32)
+ if err != nil {
+ return err
+ }
+ t.Unix = time.Duration(n) * time.Second
+ v, err := strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ t.UnixNano = time.Duration(v) * time.Nanosecond
+ t.Ticks, err = strconv.ParseInt(fields[2], 10, 64)
+ if err != nil {
+ return err
+ }
+ t.Freq, err = strconv.ParseInt(fields[3], 10, 64)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+type uint32parser struct {
+ err error
+}
+
+func (p *uint32parser) Parse(s string) uint32 {
+ if p.err != nil {
+ return 0
+ }
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ p.err = err
+ return 0
+ }
+ return uint32(n)
+}
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/disk.go b/test/integration/vendor/github.com/lufia/plan9stats/disk.go
new file mode 100644
index 000000000..4a4fa0cd9
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/disk.go
@@ -0,0 +1,116 @@
+package stats
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// Storage represents /dev/sdXX/ctl.
+type Storage struct {
+ Name string
+ Model string
+ Capacity int64
+ Partitions []*Partition
+}
+
+// Partition represents a part of /dev/sdXX/ctl.
+type Partition struct {
+ Name string
+ Start uint64
+ End uint64
+}
+
+func ReadStorages(ctx context.Context, opts ...Option) ([]*Storage, error) {
+ cfg := newConfig(opts...)
+ sdctl := filepath.Join(cfg.rootdir, "/dev/sdctl")
+ f, err := os.Open(sdctl)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var a []*Storage
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ fields := bytes.Split(scanner.Bytes(), delim)
+ if len(fields) == 0 {
+ continue
+ }
+ exp := string(fields[0]) + "*"
+ if !strings.HasPrefix(exp, "sd") {
+ continue
+ }
+ dir := filepath.Join(cfg.rootdir, "/dev", exp)
+ m, err := filepath.Glob(dir)
+ if err != nil {
+ return nil, err
+ }
+ for _, dir := range m {
+ s, err := readStorage(dir)
+ if err != nil {
+ return nil, err
+ }
+ a = append(a, s)
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return a, nil
+}
+
+func readStorage(dir string) (*Storage, error) {
+ ctl := filepath.Join(dir, "ctl")
+ f, err := os.Open(ctl)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var s Storage
+ s.Name = filepath.Base(dir)
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Bytes()
+ switch {
+ case bytes.HasPrefix(line, []byte("inquiry ")):
+ s.Model = string(bytes.TrimSpace(line[7:]))
+ case bytes.HasPrefix(line, []byte("geometry ")):
+ fields := bytes.Split(line, delim)
+ if len(fields) < 3 {
+ continue
+ }
+ var p intParser
+ sec := p.ParseInt64(string(fields[1]), 10)
+ size := p.ParseInt64(string(fields[2]), 10)
+ if err := p.Err(); err != nil {
+ return nil, err
+ }
+ s.Capacity = sec * size
+ case bytes.HasPrefix(line, []byte("part ")):
+ fields := bytes.Split(line, delim)
+ if len(fields) < 4 {
+ continue
+ }
+ var p intParser
+ start := p.ParseUint64(string(fields[2]), 10)
+ end := p.ParseUint64(string(fields[3]), 10)
+ if err := p.Err(); err != nil {
+ return nil, err
+ }
+ s.Partitions = append(s.Partitions, &Partition{
+ Name: string(fields[1]),
+ Start: start,
+ End: end,
+ })
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return &s, nil
+}
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/doc.go b/test/integration/vendor/github.com/lufia/plan9stats/doc.go
new file mode 100644
index 000000000..10e398e7a
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/doc.go
@@ -0,0 +1,2 @@
+// Package stats provides statistic utilities for Plan 9.
+package stats
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/host.go b/test/integration/vendor/github.com/lufia/plan9stats/host.go
new file mode 100644
index 000000000..a3921c0e3
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/host.go
@@ -0,0 +1,223 @@
+package stats
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var (
+ delim = []byte{' '}
+)
+
+// Host represents host status.
+type Host struct {
+ Sysname string
+ Storages []*Storage
+ Interfaces []*Interface
+}
+
+// MemStats represents the memory statistics.
+type MemStats struct {
+ Total int64 // total memory in byte
+ PageSize int64 // a page size in byte
+ KernelPages int64
+ UserPages Gauge
+ SwapPages Gauge
+
+ Malloced Gauge // kernel malloced data in byte
+ Graphics Gauge // kernel graphics data in byte
+}
+
+// Gauge is used/available gauge.
+type Gauge struct {
+ Used int64
+ Avail int64
+}
+
+func (g Gauge) Free() int64 {
+ return g.Avail - g.Used
+}
+
+// ReadMemStats reads memory statistics from /dev/swap.
+func ReadMemStats(ctx context.Context, opts ...Option) (*MemStats, error) {
+ cfg := newConfig(opts...)
+ swap := filepath.Join(cfg.rootdir, "/dev/swap")
+ f, err := os.Open(swap)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var stat MemStats
+ m := map[string]interface{}{
+ "memory": &stat.Total,
+ "pagesize": &stat.PageSize,
+ "kernel": &stat.KernelPages,
+ "user": &stat.UserPages,
+ "swap": &stat.SwapPages,
+ "kernel malloc": &stat.Malloced,
+ "kernel draw": &stat.Graphics,
+ }
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ fields := bytes.SplitN(scanner.Bytes(), delim, 2)
+ if len(fields) < 2 {
+ continue
+ }
+ switch key := string(fields[1]); key {
+ case "memory", "pagesize", "kernel":
+ v := m[key].(*int64)
+ n, err := strconv.ParseInt(string(fields[0]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ *v = n
+ case "user", "swap", "kernel malloc", "kernel draw":
+ v := m[key].(*Gauge)
+ if err := parseGauge(string(fields[0]), v); err != nil {
+ return nil, err
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return &stat, nil
+}
+
+func parseGauge(s string, r *Gauge) error {
+ a := strings.SplitN(s, "/", 2)
+ if len(a) != 2 {
+ return fmt.Errorf("can't parse ratio: %s", s)
+ }
+ var p intParser
+ u := p.ParseInt64(a[0], 10)
+ n := p.ParseInt64(a[1], 10)
+ if err := p.Err(); err != nil {
+ return err
+ }
+ r.Used = u
+ r.Avail = n
+ return nil
+}
+
+type Interface struct {
+ Name string
+ Addr string
+}
+
+const (
+ numEther = 8 // see ether(3)
+ numIpifc = 16 // see ip(3)
+)
+
+// ReadInterfaces reads network interfaces from etherN.
+func ReadInterfaces(ctx context.Context, opts ...Option) ([]*Interface, error) {
+ cfg := newConfig(opts...)
+ var a []*Interface
+ for i := 0; i < numEther; i++ {
+ p, err := readInterface(cfg.rootdir, i)
+ if os.IsNotExist(err) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ a = append(a, p)
+ }
+ return a, nil
+}
+
+func readInterface(netroot string, i int) (*Interface, error) {
+ ether := fmt.Sprintf("ether%d", i)
+ dir := filepath.Join(netroot, ether)
+ info, err := os.Stat(dir)
+ if err != nil {
+ return nil, err
+ }
+ if !info.IsDir() {
+ return nil, fmt.Errorf("%s: is not directory", dir)
+ }
+
+ addr, err := ioutil.ReadFile(filepath.Join(dir, "addr"))
+ if err != nil {
+ return nil, err
+ }
+ return &Interface{
+ Name: ether,
+ Addr: string(addr),
+ }, nil
+}
+
+var (
+ netdirs = []string{"/net", "/net.alt"}
+)
+
+// ReadHost reads host status.
+func ReadHost(ctx context.Context, opts ...Option) (*Host, error) {
+ cfg := newConfig(opts...)
+ var h Host
+ name, err := readSysname(cfg.rootdir)
+ if err != nil {
+ return nil, err
+ }
+ h.Sysname = name
+
+ a, err := ReadStorages(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ h.Storages = a
+
+ for _, s := range netdirs {
+ netroot := filepath.Join(cfg.rootdir, s)
+ ifaces, err := ReadInterfaces(ctx, WithRootDir(netroot))
+ if err != nil {
+ return nil, err
+ }
+ h.Interfaces = append(h.Interfaces, ifaces...)
+ }
+ return &h, nil
+}
+
+func readSysname(rootdir string) (string, error) {
+ file := filepath.Join(rootdir, "/dev/sysname")
+ b, err := ioutil.ReadFile(file)
+ if err != nil {
+ return "", err
+ }
+ return string(bytes.TrimSpace(b)), nil
+}
+
+type IPStats struct {
+ ID int // number of interface in ipifc dir
+ Device string // associated physical device
+ MTU int // max transfer unit
+ Sendra6 uint8 // on == send router adv
+ Recvra6 uint8 // on == recv router adv
+
+ Pktin int64 // packets read
+ Pktout int64 // packets written
+ Errin int64 // read errors
+ Errout int64 // write errors
+}
+
+type Iplifc struct {
+ IP net.IP
+ Mask net.IPMask
+ Net net.IP // ip & mask
+ PerfLifetime int64 // preferred lifetime
+ ValidLifetime int64 // valid lifetime
+}
+
+type Ipv6rp struct {
+ // TODO(lufia): see ip(2)
+}
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/int.go b/test/integration/vendor/github.com/lufia/plan9stats/int.go
new file mode 100644
index 000000000..e3c9dc834
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/int.go
@@ -0,0 +1,40 @@
+package stats
+
+import (
+ "strconv"
+)
+
+type intParser struct {
+ err error
+}
+
+func (p *intParser) ParseInt(s string, base int) int {
+ if p.err != nil {
+ return 0
+ }
+ var n int64
+ n, p.err = strconv.ParseInt(s, base, 0)
+ return int(n)
+}
+
+func (p *intParser) ParseInt64(s string, base int) int64 {
+ if p.err != nil {
+ return 0
+ }
+ var n int64
+ n, p.err = strconv.ParseInt(s, base, 64)
+ return n
+}
+
+func (p *intParser) ParseUint64(s string, base int) uint64 {
+ if p.err != nil {
+ return 0
+ }
+ var n uint64
+ n, p.err = strconv.ParseUint(s, base, 64)
+ return n
+}
+
+func (p *intParser) Err() error {
+ return p.err
+}
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/opts.go b/test/integration/vendor/github.com/lufia/plan9stats/opts.go
new file mode 100644
index 000000000..05b7d036a
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/opts.go
@@ -0,0 +1,21 @@
+package stats
+
+type Config struct {
+ rootdir string
+}
+
+type Option func(*Config)
+
+func newConfig(opts ...Option) *Config {
+ var cfg Config
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+ return &cfg
+}
+
+func WithRootDir(dir string) Option {
+ return func(cfg *Config) {
+ cfg.rootdir = dir
+ }
+}
diff --git a/test/integration/vendor/github.com/lufia/plan9stats/stats.go b/test/integration/vendor/github.com/lufia/plan9stats/stats.go
new file mode 100644
index 000000000..d4ecdcfa0
--- /dev/null
+++ b/test/integration/vendor/github.com/lufia/plan9stats/stats.go
@@ -0,0 +1,88 @@
+package stats
+
+import (
+ "bufio"
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type InterfaceStats struct {
+ PacketsReceived int64 // in packets
+ Link int // link status
+ PacketsSent int64 // out packets
+ NumCRCErr int // input CRC errors
+ NumOverflows int // packet overflows
+ NumSoftOverflows int // software overflow
+ NumFramingErr int // framing errors
+ NumBufferingErr int // buffering errors
+ NumOutputErr int // output errors
+ Promiscuous int // number of promiscuous opens
+ Mbps int // megabits per sec
+ Addr string
+}
+
+func ReadInterfaceStats(ctx context.Context, opts ...Option) (*InterfaceStats, error) {
+ cfg := newConfig(opts...)
+ file := filepath.Join(cfg.rootdir, "stats")
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var stats InterfaceStats
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ a := strings.SplitN(s, ":", 2)
+ if len(a) != 2 {
+ continue
+ }
+ var p intParser
+ v := strings.TrimSpace(a[1])
+ switch a[0] {
+ case "in":
+ stats.PacketsReceived = p.ParseInt64(v, 10)
+ case "link":
+ stats.Link = p.ParseInt(v, 10)
+ case "out":
+ stats.PacketsSent = p.ParseInt64(v, 10)
+ case "crc":
+ stats.NumCRCErr = p.ParseInt(v, 10)
+ case "overflows":
+ stats.NumOverflows = p.ParseInt(v, 10)
+ case "soft overflows":
+ stats.NumSoftOverflows = p.ParseInt(v, 10)
+ case "framing errs":
+ stats.NumFramingErr = p.ParseInt(v, 10)
+ case "buffer errs":
+ stats.NumBufferingErr = p.ParseInt(v, 10)
+ case "output errs":
+ stats.NumOutputErr = p.ParseInt(v, 10)
+ case "prom":
+ stats.Promiscuous = p.ParseInt(v, 10)
+ case "mbps":
+ stats.Mbps = p.ParseInt(v, 10)
+ case "addr":
+ stats.Addr = v
+ }
+ if err := p.Err(); err != nil {
+ return nil, err
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return &stats, nil
+}
+
+type TCPStats struct {
+ MaxConn int
+ MaxSegment int
+ ActiveOpens int
+ PassiveOpens int
+ EstablishedResets int
+ CurrentEstablished int
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/Makefile b/test/integration/vendor/github.com/nginx/agent/sdk/v2/Makefile
new file mode 100644
index 000000000..8df31cf4c
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/Makefile
@@ -0,0 +1,56 @@
+help: ## Show help message
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\033[36m\033[0m\n"} /^[$$()% 0-9a-zA-Z_-]+:.*?##/ { printf " \033[36m%-17s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+clean: ## Remove all .pb.go files
+ find . -name "*.pb.go" -not -path "*/vendor/*" -exec rm -f {} \;
+
+lint: ## Run linter
+ GOWORK=off golangci-lint run -c ../scripts/.golangci.yml
+
+generate: clean ## Generate .pb.go files
+ for packageName in "common" "events"; do \
+ protoc \
+ -I ./proto/$$packageName \
+ -I ./proto \
+ -I /usr/local/include \
+ -I ./vendor/github.com/gogo/protobuf/gogoproto \
+ -I ./vendor/github.com/gogo/protobuf/proto \
+ --gogofast_out=plugins=grpc,paths=source_relative,\
+Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\
+Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\
+Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,\
+Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types,\
+Mgoogle/api/annotations.proto=github.com/gogo/googleapis/google/api,\
+Mgoogle/protobuf/field_mask.proto=github.com/gogo/protobuf/types:\
+./proto/$$packageName/ \
+ --doc_out=./../docs/proto/ \
+ --doc_opt=markdown,$$packageName.md \
+ proto/$$packageName/*.proto;\
+ grep -v '^swagger:' ./../docs/proto/$$packageName.md > ./../docs/proto/tmp-$$packageName.md && mv ./../docs/proto/tmp-$$packageName.md ./../docs/proto/$$packageName.md;\
+ done
+
+ protoc \
+ -I ./proto/events \
+ -I ./proto \
+ -I /usr/local/include \
+ -I ./vendor/github.com/gogo/protobuf/gogoproto \
+ -I ./vendor/github.com/gogo/protobuf/proto \
+ --gogofast_out=plugins=grpc,paths=source_relative,\
+Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\
+Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\
+Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,\
+Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types,\
+Mgoogle/api/annotations.proto=github.com/gogo/googleapis/google/api,\
+Mgoogle/protobuf/field_mask.proto=github.com/gogo/protobuf/types:\
+./proto/ \
+ --doc_out=./../docs/proto/ \
+ --doc_opt=markdown,proto.md \
+ proto/*.proto; \
+ grep -v '^swagger:' ./../docs/proto/proto.md > ./../docs/proto/tmp-proto.md && mv ./../docs/proto/tmp-proto.md ./../docs/proto/proto.md
+
+ go generate ./sdk/...
+
+all-test: unit-test ## Run all tests
+
+unit-test: ## Run unit tests
+ go test -v -cover ./...
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/agent/config/config_helpers.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/agent/config/config_helpers.go
new file mode 100644
index 000000000..214b0ea85
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/agent/config/config_helpers.go
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package config
+
+const (
+ KeyDelimiter = "_"
+
+ // viper keys used in config
+ FeaturesKey = "features"
+ FeatureRegistration = FeaturesKey + KeyDelimiter + "registration"
+ FeatureNginxConfig = FeaturesKey + KeyDelimiter + "nginx-config"
+ FeatureNginxConfigAsync = FeaturesKey + KeyDelimiter + "nginx-config-async"
+ FeatureNginxSSLConfig = FeaturesKey + KeyDelimiter + "nginx-ssl-config"
+ FeatureNginxCounting = FeaturesKey + KeyDelimiter + "nginx-counting"
+ FeatureMetrics = FeaturesKey + KeyDelimiter + "metrics"
+ FeatureMetricsThrottle = FeaturesKey + KeyDelimiter + "metrics-throttle"
+ FeatureDataPlaneStatus = FeaturesKey + KeyDelimiter + "dataplane-status"
+ FeatureProcessWatcher = FeaturesKey + KeyDelimiter + "process-watcher"
+ FeatureFileWatcher = FeaturesKey + KeyDelimiter + "file-watcher"
+ FeatureActivityEvents = FeaturesKey + KeyDelimiter + "activity-events"
+ FeatureAgentAPI = FeaturesKey + KeyDelimiter + "agent-api"
+)
+
+func GetDefaultFeatures() []string {
+ return []string{
+ FeatureRegistration,
+ FeatureNginxConfig,
+ FeatureNginxSSLConfig,
+ FeatureNginxCounting,
+ FeatureNginxConfigAsync,
+ FeatureMetrics,
+ FeatureMetricsThrottle,
+ FeatureDataPlaneStatus,
+ FeatureProcessWatcher,
+ FeatureFileWatcher,
+ FeatureActivityEvents,
+ FeatureAgentAPI,
+ }
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/backoff.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/backoff.go
new file mode 100644
index 000000000..939d8a1c7
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/backoff.go
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package sdk
+
+import (
+ "context"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+const (
+ BACKOFF_JITTER = 0.10
+ BACKOFF_MULTIPLIER = backoff.DefaultMultiplier
+)
+
+func WaitUntil(
+ ctx context.Context,
+ initialInterval time.Duration,
+ maxInterval time.Duration,
+ maxElapsedTime time.Duration,
+ operation backoff.Operation,
+) error {
+ exponentialBackoff := backoff.NewExponentialBackOff()
+ exponentialBackoff.InitialInterval = initialInterval
+ exponentialBackoff.MaxInterval = maxInterval
+ exponentialBackoff.MaxElapsedTime = maxElapsedTime
+ exponentialBackoff.RandomizationFactor = BACKOFF_JITTER
+ exponentialBackoff.Multiplier = BACKOFF_MULTIPLIER
+
+ expoBackoffWithContext := backoff.WithContext(exponentialBackoff, ctx)
+
+ err := backoff.Retry(backoff.Operation(operation), expoBackoffWithContext)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/certificates.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/certificates.go
new file mode 100644
index 000000000..0d13a03c5
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/certificates.go
@@ -0,0 +1,52 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package sdk
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+)
+
+func LoadCertificates(certPath, keyPath string) (*tls.Certificate, *x509.CertPool, error) {
+ cert, err := tls.LoadX509KeyPair(certPath, keyPath)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pool := x509.NewCertPool()
+ pool.AddCert(cert.Leaf)
+
+ return &cert, pool, nil
+}
+
+func LoadCertificate(certPath string) (*x509.Certificate, error) {
+ fileContents, err := ioutil.ReadFile(certPath)
+ if err != nil {
+ return nil, err
+ }
+
+ certPEMBlock, _ := pem.Decode(fileContents)
+ if certPEMBlock == nil {
+ return nil, fmt.Errorf("could not decode: cert was not PEM format")
+ }
+
+ cert, err := x509.ParseCertificate(certPEMBlock.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return cert, nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/checksum/checksum.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/checksum/checksum.go
new file mode 100644
index 000000000..627afab04
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/checksum/checksum.go
@@ -0,0 +1,38 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package checksum
+
+import (
+ "crypto/sha256"
+ "fmt"
+)
+
+// Checksum - calculate checksum from []byte
+func Checksum(b []byte) string {
+ h := sha256.New()
+ _, _ = h.Write(b)
+ return string(h.Sum(nil))
+}
+
+func HexChecksum(b []byte) string {
+ return fmt.Sprintf("%x", Checksum(b))
+}
+
+// Chunk - split bytes to chunk limits
+func Chunk(buf []byte, lim int) [][]byte {
+ var chunk []byte
+ chunks := make([][]byte, 0, len(buf)/lim+1)
+ for len(buf) >= lim {
+ chunk, buf = buf[:lim], buf[lim:]
+ chunks = append(chunks, chunk)
+ }
+ if len(buf) > 0 {
+ chunks = append(chunks, buf[:])
+ }
+ return chunks
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/client.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/client.go
new file mode 100644
index 000000000..7922328bd
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/client.go
@@ -0,0 +1,93 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+//go:generate enumer -type=MsgClassification -text -yaml -json -transform=snake -trimprefix=MsgClassification
+
+package client
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/nginx/agent/sdk/v2/interceptors"
+ "github.com/nginx/agent/sdk/v2/proto"
+)
+
+type BackoffSettings struct {
+ initialInterval time.Duration
+ maxInterval time.Duration
+ maxTimeout time.Duration
+ sendMaxTimeout time.Duration
+}
+
+type MsgClassification int
+
+const (
+ MsgClassificationCommand MsgClassification = iota
+ MsgClassificationMetric
+ MsgClassificationEvent
+)
+
+var (
+ DefaultBackoffSettings = BackoffSettings{
+ initialInterval: 10 * time.Second,
+ maxInterval: 60 * time.Second,
+ maxTimeout: 0,
+ sendMaxTimeout: 2 * time.Minute,
+ }
+)
+
+type (
+ MsgType interface {
+ String() string
+ EnumDescriptor() ([]byte, []int)
+ }
+ Message interface {
+ Meta() *proto.Metadata
+ Type() MsgType
+ Classification() MsgClassification
+ Data() interface{}
+ Raw() interface{}
+ }
+ Client interface {
+ Connect(ctx context.Context) error
+ Close() error
+
+ Server() string
+ WithServer(string) Client
+
+ DialOptions() []grpc.DialOption
+ WithDialOptions(options ...grpc.DialOption) Client
+
+ WithInterceptor(interceptor interceptors.Interceptor) Client
+ WithClientInterceptor(interceptor interceptors.ClientInterceptor) Client
+
+ WithBackoffSettings(backoffSettings BackoffSettings) Client
+ }
+ MetricReporter interface {
+ Client
+ Send(context.Context, Message) error
+ }
+ Commander interface {
+ Client
+ ChunksSize() int
+ WithChunkSize(int) Client
+ Send(context.Context, Message) error
+ Download(context.Context, *proto.Metadata) (*proto.NginxConfig, error)
+ Upload(context.Context, *proto.NginxConfig, string) error
+ Recv() <-chan Message
+ }
+ Controller interface {
+ WithClient(Client) Controller
+ Context() context.Context
+ WithContext(context.Context) Controller
+ Connect() error
+ Close() error
+ }
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/commander.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/commander.go
new file mode 100644
index 000000000..6f8be3f2c
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/commander.go
@@ -0,0 +1,350 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "sync"
+
+ log "github.com/sirupsen/logrus"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/status"
+
+ "github.com/nginx/agent/sdk/v2"
+ "github.com/nginx/agent/sdk/v2/checksum"
+ sdkGRPC "github.com/nginx/agent/sdk/v2/grpc"
+ "github.com/nginx/agent/sdk/v2/interceptors"
+ "github.com/nginx/agent/sdk/v2/proto"
+)
+
+const (
+ DefaultChunkSize = 4 * 1024
+)
+
+func NewCommanderClient() Commander {
+ return &commander{
+ recvChan: make(chan Message, 1),
+ downloadChan: make(chan *proto.DataChunk, 1),
+ connector: newConnector(),
+ chunkSize: DefaultChunkSize,
+ backoffSettings: DefaultBackoffSettings,
+ }
+}
+
+type commander struct {
+ *connector
+ chunkSize int
+ client proto.CommanderClient
+ channel proto.Commander_CommandChannelClient
+ recvChan chan Message
+ downloadChan chan *proto.DataChunk
+ ctx context.Context
+ mu sync.Mutex
+ backoffSettings BackoffSettings
+}
+
+func (c *commander) WithInterceptor(interceptor interceptors.Interceptor) Client {
+ c.connector.interceptors = append(c.connector.interceptors, interceptor)
+ return c
+}
+
+func (c *commander) WithClientInterceptor(interceptor interceptors.ClientInterceptor) Client {
+ c.connector.clientInterceptors = append(c.connector.clientInterceptors, interceptor)
+ return c
+}
+
+func (c *commander) WithGrpcConnection(clientConnection *grpc.ClientConn) Client {
+ c.connector.grpc = clientConnection
+ return c
+}
+
+func (c *commander) Connect(ctx context.Context) error {
+ log.Debugf("Commander connecting to %s", c.server)
+
+ c.ctx = ctx
+ err := sdk.WaitUntil(
+ c.ctx,
+ c.backoffSettings.initialInterval,
+ c.backoffSettings.maxInterval,
+ c.backoffSettings.maxTimeout,
+ c.createClient,
+ )
+ if err != nil {
+ return err
+ }
+
+ go c.recvLoop()
+
+ return nil
+}
+
+func (c *commander) Close() error {
+ err := c.channel.CloseSend()
+ if err != nil {
+ return err
+ }
+ return c.grpc.Close()
+}
+
+func (c *commander) Server() string {
+ return c.server
+}
+
+func (c *commander) WithServer(s string) Client {
+ c.server = s
+ return c
+}
+
+func (c *commander) DialOptions() []grpc.DialOption {
+ return c.dialOptions
+}
+
+func (c *commander) WithDialOptions(options ...grpc.DialOption) Client {
+ c.dialOptions = append(c.dialOptions, options...)
+ return c
+}
+
+func (c *commander) WithChunkSize(i int) Client {
+ c.chunkSize = i
+ return c
+}
+
+func (c *commander) ChunksSize() int {
+ return c.chunkSize
+}
+
+func (c *commander) WithBackoffSettings(backoffSettings BackoffSettings) Client {
+ c.backoffSettings = backoffSettings
+ return c
+}
+
+func (c *commander) Send(ctx context.Context, message Message) error {
+ var (
+ cmd *proto.Command
+ ok bool
+ )
+
+ switch message.Classification() {
+ case MsgClassificationCommand:
+ if cmd, ok = message.Raw().(*proto.Command); !ok {
+ return fmt.Errorf("Expected a command message, but received %T", message.Data())
+ }
+ default:
+ return fmt.Errorf("Expected a command message, but received %T", message.Data())
+ }
+
+ err := sdk.WaitUntil(c.ctx, c.backoffSettings.initialInterval, c.backoffSettings.maxInterval, c.backoffSettings.sendMaxTimeout, func() error {
+ if err := c.channel.Send(cmd); err != nil {
+ return c.handleGrpcError("Commander Channel Send", err)
+ }
+
+ log.Tracef("Commander sent command %v", cmd)
+
+ return nil
+ })
+
+ return err
+}
+
+func (c *commander) Recv() <-chan Message {
+ return c.recvChan
+}
+
+func (c *commander) Download(ctx context.Context, metadata *proto.Metadata) (*proto.NginxConfig, error) {
+ log.Debugf("Downloading config (messageId=%s)", metadata.GetMessageId())
+ cfg := &proto.NginxConfig{}
+
+ err := sdk.WaitUntil(c.ctx, c.backoffSettings.initialInterval, c.backoffSettings.maxInterval, c.backoffSettings.sendMaxTimeout, func() error {
+ var (
+ header *proto.DataChunk_Header
+ body []byte
+ )
+
+ downloader, err := c.client.Download(c.ctx, &proto.DownloadRequest{Meta: metadata})
+ if err != nil {
+ return c.handleGrpcError("Commander Downloader", err)
+ }
+
+ LOOP:
+ for {
+ chunk, err := downloader.Recv()
+ if err != nil && err != io.EOF {
+ return c.handleGrpcError("Commander Downloader", err)
+ }
+
+ if chunk == nil {
+ break LOOP
+ }
+
+ switch dataChunk := chunk.Chunk.(type) {
+ case *proto.DataChunk_Header:
+ if header != nil {
+ return ErrDownloadHeaderUnexpectedNumber
+ }
+ header = dataChunk
+ case *proto.DataChunk_Data:
+ body = append(body, dataChunk.Data.Data...)
+ case nil:
+ break LOOP
+ }
+ }
+
+ if header == nil {
+ return ErrDownloadHeaderUnexpectedNumber
+ }
+
+ if checksum.Checksum(body) != header.Header.Checksum {
+ return ErrDownloadChecksumMismatch
+ }
+
+ err = json.Unmarshal(body, cfg)
+ if err != nil {
+ log.Warnf("Download failed to unmarshal: %s", err)
+ return ErrUnmarshallingData
+ }
+
+ return nil
+ })
+
+ return cfg, err
+}
+
+func (c *commander) Upload(ctx context.Context, cfg *proto.NginxConfig, messageId string) error {
+ payload, err := json.Marshal(cfg)
+ if err != nil {
+ return err
+ }
+
+ metadata := sdkGRPC.NewMessageMeta(messageId)
+ payloadChecksum := checksum.Checksum(payload)
+ chunks := checksum.Chunk(payload, c.chunkSize)
+
+ return sdk.WaitUntil(c.ctx, c.backoffSettings.initialInterval, c.backoffSettings.maxInterval, c.backoffSettings.sendMaxTimeout, func() error {
+ sender, err := c.client.Upload(c.ctx)
+ if err != nil {
+ return c.handleGrpcError("Commander Upload", err)
+ }
+
+ err = sender.Send(&proto.DataChunk{
+ Chunk: &proto.DataChunk_Header{
+ Header: &proto.ChunkedResourceHeader{
+ Chunks: int32(len(chunks)),
+ Checksum: payloadChecksum,
+ Meta: metadata,
+ ChunkSize: int32(c.ChunksSize()),
+ },
+ },
+ })
+ if err != nil {
+ return c.handleGrpcError("Commander Upload Header", err)
+ }
+
+ for id, chunk := range chunks {
+ log.Infof("Upload: Sending data chunk data %d (messageId=%s)", int32(id), metadata.GetMessageId())
+ if err = sender.Send(&proto.DataChunk{
+ Chunk: &proto.DataChunk_Data{
+ Data: &proto.ChunkedResourceChunk{
+ ChunkId: int32(id),
+ Data: chunk,
+ Meta: metadata,
+ },
+ },
+ }); err != nil {
+ return c.handleGrpcError("Commander Upload"+strconv.Itoa(id), err)
+ }
+ }
+
+ log.Infof("Upload sending done %s (chunks=%d)", metadata.MessageId, len(chunks))
+ status, err := sender.CloseAndRecv()
+ if err != nil {
+ return c.handleGrpcError("Commander Upload CloseAndRecv", err)
+ }
+
+ if status.Status != proto.UploadStatus_OK {
+ return fmt.Errorf(status.Reason)
+ }
+
+ return nil
+ })
+}
+
+func (c *commander) createClient() error {
+ log.Debug("Creating commander client")
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Making sure that the previous client connection is closed before creating a new one
+ if c.grpc != nil {
+ err := c.grpc.Close()
+ if err != nil {
+ log.Warnf("Error closing old grpc connection: %v", err)
+ }
+ }
+
+ grpc, err := sdkGRPC.NewGrpcConnectionWithContext(c.ctx, c.server, c.DialOptions())
+ if err != nil {
+ log.Errorf("Unable to create client connection to %s: %s", c.server, err)
+ log.Infof("Commander retrying to connect to %s", c.grpc.Target())
+ return err
+ }
+ c.grpc = grpc
+
+ c.client = proto.NewCommanderClient(c.grpc)
+
+ channel, err := c.client.CommandChannel(c.ctx)
+ if err != nil {
+ log.Errorf("Unable to create command channel: %s", err)
+ log.Infof("Commander retrying to connect to %s", c.grpc.Target())
+ return err
+ }
+ c.channel = channel
+
+ return nil
+}
+
+func (c *commander) recvLoop() {
+ log.Debug("Commander receive loop starting")
+ for {
+ err := sdk.WaitUntil(c.ctx, c.backoffSettings.initialInterval, c.backoffSettings.maxInterval, c.backoffSettings.maxTimeout, func() error {
+ cmd, err := c.channel.Recv()
+ log.Infof("Commander received %v, %v", cmd, err)
+ if err != nil {
+ return c.handleGrpcError("Commander Channel Recv", err)
+ }
+
+ select {
+ case <-c.ctx.Done():
+ case c.recvChan <- MessageFromCommand(cmd):
+ }
+
+ return nil
+ })
+ if err != nil {
+ log.Errorf("Error retrying to receive messages from the commander channel: %v", err)
+ }
+ }
+}
+
+func (c *commander) handleGrpcError(messagePrefix string, err error) error {
+ if st, ok := status.FromError(err); ok {
+ log.Errorf("%s: error communicating with %s, code=%s, message=%v", messagePrefix, c.grpc.Target(), st.Code().String(), st.Message())
+ } else if err == io.EOF {
+ log.Errorf("%s: server %s is not processing requests, code=%s, message=%v", messagePrefix, c.grpc.Target(), st.Code().String(), st.Message())
+ } else {
+ log.Errorf("%s: unknown grpc error while communicating with %s, %v", messagePrefix, c.grpc.Target(), err)
+ }
+
+ log.Infof("%s: retrying to connect to %s", messagePrefix, c.grpc.Target())
+ _ = c.createClient()
+
+ return err
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/connect.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/connect.go
new file mode 100644
index 000000000..85114e12f
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/connect.go
@@ -0,0 +1,26 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package client
+
+import (
+ "google.golang.org/grpc"
+
+ "github.com/nginx/agent/sdk/v2/interceptors"
+)
+
+type connector struct {
+ server string //nolint:structcheck,unused
+ dialOptions []grpc.DialOption //nolint:structcheck,unused
+ interceptors []interceptors.Interceptor
+ clientInterceptors []interceptors.ClientInterceptor
+ grpc *grpc.ClientConn
+}
+
+func newConnector() *connector {
+ return &connector{}
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/controller.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/controller.go
new file mode 100644
index 000000000..b89efd01b
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/controller.go
@@ -0,0 +1,68 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package client
+
+import (
+ "context"
+ "fmt"
+)
+
+func NewClientController() Controller {
+ return &ctrl{}
+}
+
+type ctrl struct {
+ ctx context.Context
+ clients []Client
+}
+
+func (c *ctrl) WithClient(client Client) Controller {
+ c.clients = append(c.clients, client)
+
+ return c
+}
+
+func (c *ctrl) WithContext(ctx context.Context) Controller {
+ c.ctx = ctx
+
+ return c
+}
+
+func (c *ctrl) Connect() error {
+ var retErr error
+ for _, client := range c.clients {
+ if err := client.Connect(c.ctx); err != nil {
+ if retErr == nil {
+ retErr = fmt.Errorf("%s failed to connect: %w", client.Server(), err)
+ } else {
+ retErr = fmt.Errorf("%v\n%s failed to connect: %w", retErr, client.Server(), err)
+ }
+ }
+ }
+
+ return retErr
+}
+
+func (c *ctrl) Close() error {
+ var retErr error
+ for _, client := range c.clients {
+ if err := client.Close(); err != nil {
+ if retErr == nil {
+ retErr = fmt.Errorf("%s failed to close: %w", client.Server(), err)
+ } else {
+ retErr = fmt.Errorf("%v\n%s failed to close: %w", retErr, client.Server(), err)
+ }
+ }
+ }
+
+ return retErr
+}
+
+func (c *ctrl) Context() context.Context {
+ return c.ctx
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/errors.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/errors.go
new file mode 100644
index 000000000..905d75b6a
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/errors.go
@@ -0,0 +1,22 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package client
+
+import (
+ "errors"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+var (
+ ErrDownloadHeaderUnexpectedNumber = &backoff.PermanentError{Err: errors.New("unexpected number of headers")}
+ ErrDownloadChecksumMismatch = &backoff.PermanentError{Err: errors.New("download checksum mismatch")}
+ ErrDownloadDataChunkNoData = &backoff.PermanentError{Err: errors.New("download DataChunk without data")}
+ ErrNotConnected = &backoff.PermanentError{Err: errors.New("not connected")}
+ ErrUnmarshallingData = &backoff.PermanentError{Err: errors.New("unable to unmarshal data")}
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/message.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/message.go
new file mode 100644
index 000000000..ae9a6ff7b
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/message.go
@@ -0,0 +1,91 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package client
+
+import (
+ "github.com/nginx/agent/sdk/v2/proto"
+ models "github.com/nginx/agent/sdk/v2/proto/events"
+)
+
+func MessageFromCommand(cmd *proto.Command) Message {
+ return &msg{
+ msgType: MsgClassificationCommand,
+ cmd: cmd,
+ }
+}
+
+func MessageFromMetrics(metric *proto.MetricsReport) Message {
+ return &msg{
+ msgType: MsgClassificationMetric,
+ metric: metric,
+ }
+}
+
+func MessageFromEvents(event *models.EventReport) Message {
+ return &msg{
+ msgType: MsgClassificationEvent,
+ event: event,
+ }
+}
+
+type msg struct {
+ msgType MsgClassification
+ cmd *proto.Command
+ metric *proto.MetricsReport
+ event *models.EventReport
+}
+
+func (m *msg) Meta() *proto.Metadata {
+ switch m.msgType {
+ case MsgClassificationCommand:
+ return m.cmd.GetMeta()
+ case MsgClassificationMetric:
+ return m.metric.GetMeta()
+ }
+
+ return nil
+}
+
+func (m *msg) Data() interface{} {
+ switch m.msgType {
+ case MsgClassificationCommand:
+ return m.cmd.GetData()
+ case MsgClassificationMetric:
+ return m.metric.GetData()
+ }
+
+ return nil
+}
+
+func (m *msg) Type() MsgType {
+ switch m.msgType {
+ case MsgClassificationCommand:
+ return m.cmd.GetType()
+ case MsgClassificationMetric:
+ return m.metric.GetType()
+ }
+
+ return nil
+}
+
+func (m *msg) Classification() MsgClassification {
+ return m.msgType
+}
+
+func (m *msg) Raw() interface{} {
+ switch m.msgType {
+ case MsgClassificationCommand:
+ return m.cmd
+ case MsgClassificationMetric:
+ return m.metric
+ case MsgClassificationEvent:
+ return m.event
+ }
+
+ return nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/metric_reporter.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/metric_reporter.go
new file mode 100644
index 000000000..06fe5c328
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/client/metric_reporter.go
@@ -0,0 +1,210 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+
+ log "github.com/sirupsen/logrus"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/status"
+
+ "github.com/nginx/agent/sdk/v2"
+ sdkGRPC "github.com/nginx/agent/sdk/v2/grpc"
+ "github.com/nginx/agent/sdk/v2/interceptors"
+ "github.com/nginx/agent/sdk/v2/proto"
+ events "github.com/nginx/agent/sdk/v2/proto/events"
+)
+
+func NewMetricReporterClient() MetricReporter {
+ return &metricReporter{
+ connector: newConnector(),
+ backoffSettings: DefaultBackoffSettings,
+ }
+}
+
+type metricReporter struct {
+ *connector
+ client proto.MetricsServiceClient
+ channel proto.MetricsService_StreamClient
+ eventsChannel proto.MetricsService_StreamEventsClient
+ ctx context.Context
+ mu sync.Mutex
+ backoffSettings BackoffSettings
+}
+
+func (r *metricReporter) WithInterceptor(interceptor interceptors.Interceptor) Client {
+ r.connector.interceptors = append(r.connector.interceptors, interceptor)
+
+ return r
+}
+
+func (r *metricReporter) WithClientInterceptor(interceptor interceptors.ClientInterceptor) Client {
+ r.clientInterceptors = append(r.clientInterceptors, interceptor)
+
+ return r
+}
+
+func (r *metricReporter) Connect(ctx context.Context) error {
+ log.Debugf("Metric Reporter connecting to %s", r.server)
+
+ r.ctx = ctx
+ err := sdk.WaitUntil(
+ r.ctx,
+ r.backoffSettings.initialInterval,
+ r.backoffSettings.maxInterval,
+ r.backoffSettings.maxTimeout,
+ r.createClient,
+ )
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *metricReporter) createClient() error {
+ log.Debug("Creating metric reporter client")
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ // Making sure that the previous client connection is closed before creating a new one
+ if r.grpc != nil {
+ err := r.grpc.Close()
+ if err != nil {
+ log.Warnf("Error closing old grpc connection: %v", err)
+ }
+ }
+
+ grpc, err := sdkGRPC.NewGrpcConnectionWithContext(r.ctx, r.server, r.DialOptions())
+ if err != nil {
+ log.Errorf("Unable to create client connection to %s: %s", r.server, err)
+ log.Infof("Metric reporter retrying to connect to %s", r.grpc.Target())
+ return err
+ }
+ r.grpc = grpc
+
+ r.client = proto.NewMetricsServiceClient(r.grpc)
+
+ channel, err := r.client.Stream(r.ctx)
+ if err != nil {
+ log.Warnf("Unable to create metrics channel: %s", err)
+ log.Infof("Metric reporter retrying to connect to %s", r.grpc.Target())
+ return err
+ }
+
+ eventsChannel, err := r.client.StreamEvents(r.ctx)
+ if err != nil {
+ log.Warnf("Unable to create events channel: %s", err)
+ log.Infof("Metric reporter retrying to connect to %s", r.grpc.Target())
+ return err
+ }
+
+ r.channel = channel
+ r.eventsChannel = eventsChannel
+
+ return nil
+}
+
+func (r *metricReporter) Close() (err error) {
+ return r.closeConnection()
+}
+
+func (r *metricReporter) Server() string {
+ return r.server
+}
+
+func (r *metricReporter) WithServer(s string) Client {
+ r.server = s
+
+ return r
+}
+
+func (r *metricReporter) DialOptions() []grpc.DialOption {
+ return r.dialOptions
+}
+
+func (r *metricReporter) WithDialOptions(options ...grpc.DialOption) Client {
+ r.dialOptions = append(r.dialOptions, options...)
+
+ return r
+}
+
+func (r *metricReporter) WithBackoffSettings(backoffSettings BackoffSettings) Client {
+ r.backoffSettings = backoffSettings
+ return r
+}
+
+func (r *metricReporter) Send(ctx context.Context, message Message) error {
+ var err error
+
+ switch message.Classification() {
+ case MsgClassificationMetric:
+ report, ok := message.Raw().(*proto.MetricsReport)
+ if !ok {
+ return fmt.Errorf("MetricReporter expected a metrics report message, but received %T", message.Data())
+ }
+ err = sdk.WaitUntil(r.ctx, r.backoffSettings.initialInterval, r.backoffSettings.maxInterval, r.backoffSettings.sendMaxTimeout, func() error {
+ if err := r.channel.Send(report); err != nil {
+ return r.handleGrpcError("Metric Reporter Channel Send", err)
+ }
+
+ log.Tracef("MetricReporter sent metrics report %v", report)
+
+ return nil
+ })
+ case MsgClassificationEvent:
+ report, ok := message.Raw().(*events.EventReport)
+ if !ok {
+ return fmt.Errorf("MetricReporter expected an events report message, but received %T", message.Data())
+ }
+ err = sdk.WaitUntil(r.ctx, r.backoffSettings.initialInterval, r.backoffSettings.maxInterval, r.backoffSettings.sendMaxTimeout, func() error {
+ if err := r.eventsChannel.Send(report); err != nil {
+ return r.handleGrpcError("Metric Reporter Events Channel Send", err)
+ }
+
+ log.Tracef("MetricReporter sent events report %v", report)
+
+ return nil
+ })
+ default:
+ return fmt.Errorf("MetricReporter expected a metrics or events report message, but received %T", message.Data())
+ }
+
+ return err
+}
+
+func (r *metricReporter) closeConnection() error {
+ err := r.channel.CloseSend()
+ if err != nil {
+ return err
+ }
+ err = r.eventsChannel.CloseSend()
+ if err != nil {
+ return err
+ }
+ return r.grpc.Close()
+}
+
+func (r *metricReporter) handleGrpcError(messagePrefix string, err error) error {
+ if st, ok := status.FromError(err); ok {
+ log.Errorf("%s: error communicating with %s, code=%s, message=%v", messagePrefix, r.grpc.Target(), st.Code().String(), st.Message())
+ } else if err == io.EOF {
+ log.Errorf("%s: server %s is not processing requests, code=%s, message=%v", messagePrefix, r.grpc.Target(), st.Code().String(), st.Message())
+ } else {
+ log.Errorf("%s: unknown grpc error while communicating with %s, %v", messagePrefix, r.grpc.Target(), err)
+ }
+
+ log.Infof("%s: retrying to connect to %s", messagePrefix, r.grpc.Target())
+ _ = r.createClient()
+
+ return err
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/config_apply.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/config_apply.go
new file mode 100644
index 000000000..59a21dd83
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/config_apply.go
@@ -0,0 +1,260 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package sdk
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ crossplane "github.com/nginxinc/nginx-go-crossplane"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/nginx/agent/sdk/v2/zip"
+)
+
+// ConfigApply facilitates synchronizing the current config against incoming config_apply request. By keeping track
+// of the current files, mark them off as they are getting applied, and delete any leftovers that's not in the incoming
+// apply payload.
+type ConfigApply struct {
+ writer *zip.Writer
+ existing map[string]struct{}
+ notExists map[string]struct{} // set of files that exists in the config provided payload, but not on disk
+ notExistDirs map[string]struct{} // set of directories that exists in the config provided payload, but not on disk
+}
+
+func NewConfigApply(
+ confFile string,
+ allowedDirectories map[string]struct{},
+) (*ConfigApply, error) {
+ w, err := zip.NewWriter("/")
+ if err != nil {
+ return nil, err
+ }
+ b := &ConfigApply{
+ writer: w,
+ existing: make(map[string]struct{}),
+ notExists: make(map[string]struct{}),
+ notExistDirs: make(map[string]struct{}),
+ }
+ if confFile != "" {
+ return b, b.mapCurrentFiles(confFile, allowedDirectories)
+ }
+ return b, nil
+}
+
+// Rollback dumps the saved file content, and delete the notExists file. Best effort, will log error and continue
+// if file operation failed during rollback.
+func (b *ConfigApply) Rollback(cause error) error {
+ log.Warnf("config_apply: rollback from cause: %s", cause)
+
+ filesProto, err := b.writer.Proto()
+ if err != nil {
+ return fmt.Errorf("unrecoverable error during rollback (proto): %s", err)
+ }
+
+ r, err := zip.NewReader(filesProto)
+ if err != nil {
+ return fmt.Errorf("unrecoverable error during rollback (reader): %s", err)
+ }
+
+ for fullPath := range b.notExists {
+ err = os.Remove(fullPath)
+ if err != nil {
+ log.Warnf("error during rollback (remove) for %s: %s", fullPath, err)
+ }
+ }
+
+ for fullPath := range b.notExistDirs {
+ err = os.RemoveAll(fullPath)
+ if err != nil {
+ log.Warnf("error during rollback (remove dir) for %s: %s", fullPath, err)
+ }
+ }
+
+ r.RangeFileReaders(func(innerErr error, path string, mode os.FileMode, r io.Reader) bool {
+ if innerErr != nil {
+ log.Warnf("error during rollback for %s: %s", path, innerErr)
+ return true
+ }
+ var f *os.File
+ f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ log.Warnf("error during rollback (open) for %s: %s", path, err)
+ return true
+ }
+ defer f.Close()
+ _, err = io.Copy(f, r)
+ if err != nil {
+ log.Warnf("error during rollback (copy) for %s: %s", path, err)
+ return true
+ }
+ log.Tracef("config_apply: rollback wrote to %s", path)
+ return true
+ })
+
+ log.Info("config_apply: rollback complete")
+
+ return nil
+}
+
+// Complete deletes any leftover files in the existing list, return error if failed to do so
+func (b *ConfigApply) Complete() error {
+ log.Debugf("config_apply: complete, removing %d leftover files", len(b.existing))
+ for file := range b.existing {
+ log.Infof("config_apply: deleting %s", file)
+ if err := os.Remove(file); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// MarkAndSave marks the provided fullPath, and save the content of the file in the provided fullPath
+func (b *ConfigApply) MarkAndSave(fullPath string) error {
+ // delete from existing list, so we don't delete them during Complete
+ delete(b.existing, fullPath)
+
+ p, err := os.Stat(fullPath)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ b.notExists[fullPath] = struct{}{}
+ log.Debugf("backup: %s does not exist", fullPath)
+
+ for dir := range b.notExistDirs {
+ if strings.HasPrefix(fullPath, dir) {
+ return nil
+ }
+ }
+
+ paths := strings.Split(fullPath, "/")
+ for i := 2; i < len(paths); i++ {
+ dirPath := strings.Join(paths[0:i], "/")
+
+ _, err := os.Stat(dirPath)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ b.notExistDirs[dirPath] = struct{}{}
+ log.Debugf("backup: dir %s does not exist", dirPath)
+ return nil
+ }
+
+ log.Warnf("backup: dir %s error: %s", dirPath, err)
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ log.Warnf("backup: %s error: %s", fullPath, err)
+ return err
+ }
+
+ r, err := os.Open(fullPath)
+ if err != nil {
+ log.Warnf("backup: %s open error: %s", fullPath, err)
+ return err
+ }
+ defer r.Close()
+ log.Tracef("backup: %s mode=%s bytes=%d", fullPath, p.Mode(), p.Size())
+ return b.writer.Add(fullPath, p.Mode(), r)
+}
+
+func (b *ConfigApply) RemoveFromNotExists(fullPath string) {
+ delete(b.notExists, fullPath)
+}
+
+// mapCurrentFiles parse the provided file via cross-plane, generate a list of files, which should be identical to the
+// DirectoryMap, will mark off the files as the config is being applied, any leftovers after complete should be deleted.
+func (b *ConfigApply) mapCurrentFiles(confFile string, allowedDirectories map[string]struct{}) error {
+ log.Debugf("parsing %s", confFile)
+ payload, err := crossplane.Parse(confFile,
+ &crossplane.ParseOptions{
+ SingleFile: false,
+ StopParsingOnError: true,
+ },
+ )
+ if err != nil {
+ log.Debugf("failed to parse %s: %s", confFile, err)
+ return err
+ }
+ seen := make(map[string]struct{})
+ for _, xpc := range payload.Config {
+ if !allowedPath(xpc.File, allowedDirectories) {
+ continue
+ }
+ log.Debugf("config_apply: marking file (%s): %s", confFile, xpc.File)
+ _, err = os.Stat(xpc.File)
+ if err != nil {
+ return fmt.Errorf("config_apply: %s read error %s", xpc.File, err)
+ }
+ b.existing[xpc.File] = struct{}{}
+ err = CrossplaneConfigTraverse(&xpc,
+ func(parent *crossplane.Directive, directive *crossplane.Directive) (bool, error) {
+ switch directive.Directive {
+ case "root":
+ if err = b.walkRoot(directive.Args[0], seen, allowedDirectories); err != nil {
+ log.Warnf("config_apply: walk root error %s: %s", directive.Args[0], err)
+ return false, err
+ }
+ }
+ return true, nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *ConfigApply) walkRoot(dir string, seen, allowedDirectories map[string]struct{}) error {
+ if _, ok := seen[dir]; ok {
+ return nil
+ }
+ seen[dir] = struct{}{}
+ if !allowedPath(dir, allowedDirectories) {
+ return nil
+ }
+ return filepath.WalkDir(dir,
+ func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if d.IsDir() {
+ return nil
+ }
+ // the Info call here is, so we are as close as possible to the config code
+ _, err = d.Info()
+ if err != nil {
+ return err
+ }
+ b.existing[path] = struct{}{}
+ return nil
+ },
+ )
+}
+
+func (b *ConfigApply) GetExisting() map[string]struct{} {
+ return b.existing
+}
+
+func (b *ConfigApply) GetNotExists() map[string]struct{} {
+ return b.notExists
+}
+
+func (b *ConfigApply) GetNotExistDirs() map[string]struct{} {
+ return b.notExistDirs
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/config_helpers.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/config_helpers.go
new file mode 100644
index 000000000..9155d6523
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/config_helpers.go
@@ -0,0 +1,881 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package sdk
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io/fs"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+
+ filesSDK "github.com/nginx/agent/sdk/v2/files"
+ "github.com/nginx/agent/sdk/v2/proto"
+ "github.com/nginx/agent/sdk/v2/zip"
+ "github.com/nginxinc/nginx-go-crossplane"
+)
+
+type DirectoryMap struct {
+ paths map[string]*proto.Directory
+}
+
+func newDirectoryMap() *DirectoryMap {
+ return &DirectoryMap{make(map[string]*proto.Directory)}
+}
+
+func (dm DirectoryMap) addDirectory(dir string) error {
+ _, ok := dm.paths[dir]
+ if !ok {
+ info, err := os.Stat(dir)
+ if err != nil {
+ return fmt.Errorf("configs: could not read dir info(%s): %s", dir, err)
+ }
+
+ directory := &proto.Directory{
+ Name: dir,
+ Mtime: filesSDK.TimeConvert(info.ModTime()),
+ Permissions: filesSDK.GetPermissions(info.Mode()),
+ Size_: info.Size(),
+ Files: make([]*proto.File, 0),
+ }
+
+ dm.paths[dir] = directory
+ }
+ return nil
+}
+
+func (dm DirectoryMap) appendFile(dir string, info fs.FileInfo) error {
+ fileProto := &proto.File{
+ Name: info.Name(),
+ Mtime: filesSDK.TimeConvert(info.ModTime()),
+ Permissions: filesSDK.GetPermissions(info.Mode()),
+ Size_: info.Size(),
+ }
+
+ return dm.appendFileWithProto(dir, fileProto)
+}
+
+func (dm DirectoryMap) appendFileWithProto(dir string, fileProto *proto.File) error {
+ _, ok := dm.paths[dir]
+ if !ok {
+ err := dm.addDirectory(dir)
+
+ if err != nil {
+ return err
+ }
+ }
+
+ dm.paths[dir].Files = append(dm.paths[dir].Files, fileProto)
+
+ return nil
+}
+
+// GetNginxConfig parse the configFile into proto.NginxConfig payload, using the provided nginxID, and systemID for
+// ConfigDescriptor in the NginxConfig. The allowedDirectories is used to allowlist the directories we include
+// in the aux payload.
+func GetNginxConfig(
+ confFile,
+ nginxId,
+ systemId string,
+ allowedDirectories map[string]struct{},
+) (*proto.NginxConfig, error) {
+ payload, err := crossplane.Parse(confFile,
+ &crossplane.ParseOptions{
+ SingleFile: false,
+ StopParsingOnError: true,
+ },
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error reading config from %s, error: %s", confFile, err)
+ }
+
+ nginxConfig := &proto.NginxConfig{
+ Action: proto.NginxConfigAction_RETURN,
+ ConfigData: &proto.ConfigDescriptor{
+ NginxId: nginxId,
+ SystemId: systemId,
+ },
+ Zconfig: nil,
+ Zaux: nil,
+ AccessLogs: &proto.AccessLogs{AccessLog: make([]*proto.AccessLog, 0)},
+ ErrorLogs: &proto.ErrorLogs{ErrorLog: make([]*proto.ErrorLog, 0)},
+ Ssl: &proto.SslCertificates{SslCerts: make([]*proto.SslCertificate, 0)},
+ DirectoryMap: &proto.DirectoryMap{Directories: make([]*proto.Directory, 0)},
+ }
+
+ err = updateNginxConfigFromPayload(confFile, payload, nginxConfig, allowedDirectories)
+ if err != nil {
+ return nil, fmt.Errorf("error assemble payload from %s, error: %s", confFile, err)
+ }
+
+ return nginxConfig, nil
+}
+
+// updateNginxConfigFromPayload updates config files from payload.
+func updateNginxConfigFromPayload(
+ confFile string,
+ payload *crossplane.Payload,
+ nginxConfig *proto.NginxConfig,
+ allowedDirectories map[string]struct{},
+) error {
+ conf, err := zip.NewWriter(filepath.Dir(confFile))
+ if err != nil {
+ return fmt.Errorf("configs: could not create zip writer: %s", err)
+ }
+ aux, err := zip.NewWriter(filepath.Dir(confFile))
+ if err != nil {
+ return fmt.Errorf("configs: could not create auxillary zip writer: %s", err)
+ }
+
+ // cache the directory map, so we can look up using the base
+ directoryMap := newDirectoryMap()
+ formatMap := map[string]string{} // map of accessLog/errorLog formats
+ seen := make(map[string]struct{}) // local cache of seen files
+
+ // Add files to the zipped config in a consistent order.
+ if err = conf.AddFile(payload.Config[0].File); err != nil {
+ return fmt.Errorf("configs: could not add conf(%s): %v", payload.Config[0].File, err)
+ }
+
+ rest := make([]crossplane.Config, len(payload.Config[1:]))
+ copy(rest, payload.Config[1:])
+ sort.Slice(rest, func(i, j int) bool {
+ return rest[i].File < rest[j].File
+ })
+ for _, xpConf := range rest {
+ if err = conf.AddFile(xpConf.File); err != nil {
+ return fmt.Errorf("configs could not add conf file to archive: %s", err)
+ }
+ }
+
+ // all files in the payload are config files
+ var info fs.FileInfo
+ for _, xpConf := range payload.Config {
+ base := filepath.Dir(xpConf.File)
+
+ info, err = os.Stat(xpConf.File)
+ if err != nil {
+ return fmt.Errorf("configs: could not read file info(%s): %s", xpConf.File, err)
+ }
+
+ if err := directoryMap.appendFile(base, info); err != nil {
+ return err
+ }
+
+ err = updateNginxConfigFileConfig(xpConf, nginxConfig, filepath.Dir(confFile), aux, formatMap, seen, allowedDirectories, directoryMap)
+ if err != nil {
+ return fmt.Errorf("configs: failed to update nginx config: %s", err)
+ }
+ }
+
+ nginxConfig.Zconfig, err = conf.Proto()
+ if err != nil {
+ return fmt.Errorf("configs: failed to get conf proto: %s", err)
+ }
+
+ if aux.FileLen() > 0 {
+ nginxConfig.Zaux, err = aux.Proto()
+ if err != nil {
+ return fmt.Errorf("configs: failed to get aux proto: %s", err)
+ }
+ }
+
+ setDirectoryMap(directoryMap, nginxConfig)
+
+ return nil
+}
+
+func setDirectoryMap(directories *DirectoryMap, nginxConfig *proto.NginxConfig) {
+ // empty the DirectoryMap first
+ nginxConfig.DirectoryMap.Directories = nginxConfig.DirectoryMap.Directories[:0]
+ keys := make([]string, 0, len(directories.paths))
+ for k := range directories.paths {
+ keys = append(keys, k)
+ }
+
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ nginxConfig.DirectoryMap.Directories = append(nginxConfig.DirectoryMap.Directories, directories.paths[k])
+ }
+}
+
+func updateNginxConfigFileConfig(
+ conf crossplane.Config,
+ nginxConfig *proto.NginxConfig,
+ hostDir string,
+ aux *zip.Writer,
+ formatMap map[string]string,
+ seen map[string]struct{},
+ allowedDirectories map[string]struct{},
+ directoryMap *DirectoryMap,
+) error {
+ err := CrossplaneConfigTraverse(&conf,
+ func(parent *crossplane.Directive, directive *crossplane.Directive) (bool, error) {
+ switch directive.Directive {
+ case "log_format":
+ if len(directive.Args) >= 2 {
+ formatMap[directive.Args[0]] = strings.Join(directive.Args[1:], "")
+ }
+ case "root":
+ if err := updateNginxConfigFileWithRoot(aux, directive.Args[0], seen, allowedDirectories, directoryMap); err != nil {
+ return true, err
+ }
+ case "ssl_certificate", "ssl_trusted_certificate":
+ if err := updateNginxConfigWithCert(directive.Directive, directive.Args[0], nginxConfig, aux, hostDir, directoryMap, allowedDirectories); err != nil {
+ return true, err
+ }
+ case "access_log":
+ updateNginxConfigWithAccessLog(
+ directive.Args[0],
+ getAccessLogDirectiveFormat(directive),
+ nginxConfig, formatMap, seen)
+ case "error_log":
+ updateNginxConfigWithErrorLog(
+ directive.Args[0],
+ getErrorLogDirectiveLevel(directive),
+ nginxConfig, seen)
+ }
+ return true, nil
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func updateNginxConfigWithCert(
+ directive string,
+ file string,
+ nginxConfig *proto.NginxConfig,
+ aux *zip.Writer,
+ rootDir string,
+ directoryMap *DirectoryMap,
+ allowedDirectories map[string]struct{},
+) error {
+ if strings.HasPrefix("$", file) {
+ // variable loading, not actual cert file
+ return nil
+ }
+
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(rootDir, file)
+ }
+ info, err := os.Stat(file)
+ if err != nil {
+ return err
+ }
+
+ isAllowed := false
+ for dir := range allowedDirectories {
+ if strings.HasPrefix(file, dir) {
+ isAllowed = true
+ break
+ }
+ }
+
+ if directive == "ssl_certificate" {
+ cert, err := LoadCertificate(file)
+ if err != nil {
+ return fmt.Errorf("configs: could not load cert(%s): %s", file, err)
+ }
+
+ fingerprint := sha256.Sum256(cert.Raw)
+ certProto := &proto.SslCertificate{
+ FileName: file,
+ PublicKeyAlgorithm: cert.PublicKeyAlgorithm.String(),
+ SignatureAlgorithm: cert.SignatureAlgorithm.String(),
+ Issuer: &proto.CertificateName{
+ CommonName: cert.Issuer.CommonName,
+ Country: cert.Issuer.Country,
+ Locality: cert.Issuer.Locality,
+ Organization: cert.Issuer.Organization,
+ OrganizationalUnit: cert.Issuer.OrganizationalUnit,
+ },
+ Subject: &proto.CertificateName{
+ CommonName: cert.Subject.CommonName,
+ Country: cert.Subject.Country,
+ Locality: cert.Subject.Locality,
+ Organization: cert.Subject.Organization,
+ OrganizationalUnit: cert.Subject.OrganizationalUnit,
+ State: cert.Subject.Province,
+ },
+ Validity: &proto.CertificateDates{
+ NotBefore: cert.NotBefore.Unix(),
+ NotAfter: cert.NotAfter.Unix(),
+ },
+ SubjAltNames: cert.DNSNames,
+ SerialNumber: cert.SerialNumber.String(),
+ OcspUrl: cert.IssuingCertificateURL,
+ SubjectKeyIdentifier: convertToHexFormat(hex.EncodeToString(cert.SubjectKeyId)),
+ Fingerprint: convertToHexFormat(hex.EncodeToString(fingerprint[:])),
+ FingerprintAlgorithm: cert.SignatureAlgorithm.String(),
+ Version: int64(cert.Version),
+ AuthorityKeyIdentifier: convertToHexFormat(hex.EncodeToString(cert.AuthorityKeyId)),
+ }
+ certProto.Mtime = filesSDK.TimeConvert(info.ModTime())
+ certProto.Size_ = info.Size()
+
+ nginxConfig.Ssl.SslCerts = append(nginxConfig.Ssl.SslCerts, certProto)
+ }
+
+ if !isAllowed {
+ log.Infof("certs: %s outside allowed directory, not including in aux payloads", file)
+ // we want the meta information, but skip putting the files into the aux contents
+ return nil
+ }
+ if err := directoryMap.appendFile(filepath.Dir(file), info); err != nil {
+ return err
+ }
+
+ if err := aux.AddFile(file); err != nil {
+ return fmt.Errorf("configs: could not add cert to aux file writer: %s", err)
+ }
+
+ return nil
+}
+
+func getAccessLogDirectiveFormat(directive *crossplane.Directive) string {
+ var format string
+ if len(directive.Args) >= 2 {
+ format = strings.ReplaceAll(directive.Args[1], "$", "")
+ }
+ return format
+}
+
+func getErrorLogDirectiveLevel(directive *crossplane.Directive) string {
+ if len(directive.Args) >= 2 {
+ return directive.Args[1]
+ }
+ return ""
+}
+
+func updateNginxConfigWithAccessLog(file string, format string, nginxConfig *proto.NginxConfig, formatMap map[string]string, seen map[string]struct{}) {
+ if _, ok := seen[file]; ok {
+ return
+ }
+
+ al := &proto.AccessLog{
+ Name: file,
+ Readable: false,
+ }
+
+ info, err := os.Stat(file)
+ if err == nil {
+ // survivable error
+ al.Readable = true
+ al.Permissions = filesSDK.GetPermissions(info.Mode())
+ }
+
+ if formatMap[format] != "" {
+ al.Format = formatMap[format]
+ } else {
+ al.Format = format
+ }
+
+ nginxConfig.AccessLogs.AccessLog = append(nginxConfig.AccessLogs.AccessLog, al)
+ seen[file] = struct{}{}
+}
+
+func updateNginxConfigWithAccessLogPath(file string, nginxConfig *proto.NginxConfig, seen map[string]struct{}) {
+ if _, ok := seen[file]; ok {
+ return
+ }
+ al := &proto.AccessLog{
+ Name: file,
+ }
+
+ nginxConfig.AccessLogs.AccessLog = append(nginxConfig.AccessLogs.AccessLog, al)
+ seen[file] = struct{}{}
+}
+
+func updateNginxConfigWithErrorLog(
+ file string,
+ level string,
+ nginxConfig *proto.NginxConfig,
+ seen map[string]struct{},
+) {
+ if _, ok := seen[file]; ok {
+ return
+ }
+ el := &proto.ErrorLog{
+ Name: file,
+ LogLevel: level,
+ Readable: false,
+ }
+ info, err := os.Stat(file)
+ if err == nil {
+ // survivable error
+ el.Permissions = filesSDK.GetPermissions(info.Mode())
+ el.Readable = true
+ }
+
+ nginxConfig.ErrorLogs.ErrorLog = append(nginxConfig.ErrorLogs.ErrorLog, el)
+ seen[file] = struct{}{}
+}
+
+func updateNginxConfigWithErrorLogPath(
+ file string,
+ nginxConfig *proto.NginxConfig,
+ seen map[string]struct{},
+) {
+ if _, ok := seen[file]; ok {
+ return
+ }
+ el := &proto.ErrorLog{
+ Name: file,
+ }
+ nginxConfig.ErrorLogs.ErrorLog = append(nginxConfig.ErrorLogs.ErrorLog, el)
+ seen[file] = struct{}{}
+}
+
+// root directive, so we slurp up all the files in the directory
+func updateNginxConfigFileWithRoot(
+ aux *zip.Writer,
+ dir string,
+ seen map[string]struct{},
+ allowedDirectories map[string]struct{},
+ directoryMap *DirectoryMap,
+) error {
+ if _, ok := seen[dir]; ok {
+ return nil
+ }
+ seen[dir] = struct{}{}
+ if !allowedPath(dir, allowedDirectories) {
+ log.Debugf("Directory %s, is not in the allowed directory list so it will be excluded. Please add the directory to config_dirs in nginx-agent.conf", dir)
+ return nil
+ }
+
+ return filepath.WalkDir(dir,
+ func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if _, ok := seen[path]; ok {
+ return nil
+ }
+ seen[path] = struct{}{}
+
+ if d.IsDir() {
+ if err := directoryMap.addDirectory(path); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ var info fs.FileInfo
+ info, err = d.Info()
+ if err != nil {
+ return err
+ }
+ reader, err := os.Open(path)
+ if err != nil {
+ return fmt.Errorf("could read file(%s): %s", path, err)
+ }
+ defer reader.Close()
+
+ if err := directoryMap.appendFile(filepath.Dir(path), info); err != nil {
+ return err
+ }
+
+ if err = aux.Add(path, info.Mode(), reader); err != nil {
+ return fmt.Errorf("adding auxillary file error: %s", err)
+ }
+
+ return nil
+ },
+ )
+}
+
+func updateNginxConfigFileWithAuxFile(
+ aux *zip.Writer,
+ file string,
+ config *proto.NginxConfig,
+ seen map[string]struct{},
+ allowedDirectories map[string]struct{},
+ directoryMap *DirectoryMap,
+ okIfFileNotExist bool,
+) error {
+ if _, ok := seen[file]; ok {
+ return nil
+ }
+ if !allowedPath(file, allowedDirectories) {
+ log.Warnf("Unable to retrieve the NAP aux file %s as it is not in the allowed directory list. Please add the directory to config_dirs in nginx-agent.conf.", file)
+ return nil
+ }
+
+ info, err := os.Stat(file)
+ if err != nil {
+ if okIfFileNotExist {
+ log.Debugf("Unable to retrieve the aux file %s.", file)
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ if err := directoryMap.appendFile(filepath.Dir(file), info); err != nil {
+ return err
+ }
+
+ if err := aux.AddFile(file); err != nil {
+ return err
+ }
+ seen[file] = struct{}{}
+ return nil
+}
+
+func GetNginxConfigFiles(config *proto.NginxConfig) (confFiles, auxFiles []*proto.File, err error) {
+ if config.GetZconfig() == nil {
+ return nil, nil, errors.New("config is empty")
+ }
+
+ confFiles, err = zip.UnPack(config.GetZconfig())
+ if err != nil {
+ return nil, nil, fmt.Errorf("unpack zipped config error: %s", err)
+ }
+
+ if aux := config.GetZaux(); aux != nil && len(aux.Contents) > 0 {
+ auxFiles, err = zip.UnPack(aux)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unpack zipped auxiliary error: %s", err)
+ }
+ }
+ return confFiles, auxFiles, nil
+}
+
+// AddAuxfileToNginxConfig adds the specified newAuxFile to the Nginx Config cfg
+func AddAuxfileToNginxConfig(
+ confFile string,
+ cfg *proto.NginxConfig,
+ newAuxFile string,
+ allowedDirectories map[string]struct{},
+ okIfFileNotExist bool,
+) (*proto.NginxConfig, error) {
+ directoryMap := newDirectoryMap()
+ for _, d := range cfg.DirectoryMap.Directories {
+ for _, f := range d.Files {
+ err := directoryMap.appendFileWithProto(d.Name, f)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ _, auxFiles, err := GetNginxConfigFiles(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ aux, err := zip.NewWriter(filepath.Dir(confFile))
+ if err != nil {
+ return nil, fmt.Errorf("configs: could not create auxillary zip writer: %s", err)
+ }
+
+ seen := make(map[string]struct{})
+ for _, file := range auxFiles {
+ seen[file.Name] = struct{}{}
+ err = aux.AddFile(file.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // add the aux file
+ err = updateNginxConfigFileWithAuxFile(aux, newAuxFile, cfg, seen, allowedDirectories, directoryMap, okIfFileNotExist)
+ if err != nil {
+ return nil, fmt.Errorf("configs: failed to update nginx app protect metadata file: %s", err)
+ }
+
+ if aux.FileLen() > 0 {
+ cfg.Zaux, err = aux.Proto()
+ if err != nil {
+ log.Errorf("configs: failed to get aux proto: %s", err)
+ return nil, err
+ }
+ }
+
+ setDirectoryMap(directoryMap, cfg)
+
+ return cfg, nil
+}
+
+const (
+ plusAPIDirective = "api"
+ ossAPIDirective = "stub_status"
+ apiFormat = "http://%s%s"
+)
+
+func parseStatusAPIEndpoints(parent *crossplane.Directive, current *crossplane.Directive) ([]string, []string) {
+ var plusUrls []string
+ var ossUrls []string
+ // process from the location block
+ if current.Directive != "location" {
+ return plusUrls, ossUrls
+ }
+
+ for _, locChild := range current.Block {
+ if locChild.Directive != plusAPIDirective && locChild.Directive != ossAPIDirective {
+ continue
+ }
+ host := parseServerHost(parent)
+ path := parseLocationPath(current)
+ switch locChild.Directive {
+ case plusAPIDirective:
+ plusUrls = append(plusUrls, fmt.Sprintf(apiFormat, host, path))
+ case ossAPIDirective:
+ ossUrls = append(ossUrls, fmt.Sprintf(apiFormat, host, path))
+ }
+ }
+ return plusUrls, ossUrls
+}
+
+func parseServerHost(parent *crossplane.Directive) string {
+ listenPort := "80"
+ serverName := "localhost"
+ for _, dir := range parent.Block {
+ switch dir.Directive {
+ case "listen":
+ host, port, err := net.SplitHostPort(dir.Args[0])
+ if err == nil {
+ if host != "*" && host != "::" {
+ serverName = host
+ }
+ listenPort = port
+ } else {
+ if isPort(dir.Args[0]) {
+ listenPort = dir.Args[0]
+ } else {
+ serverName = dir.Args[0]
+ }
+ }
+ case "server_name":
+ if dir.Args[0] == "_" {
+ // default server
+ continue
+ }
+ serverName = dir.Args[0]
+ }
+ }
+ return fmt.Sprintf("%s:%s", serverName, listenPort)
+}
+
+func isPort(value string) bool {
+ port, err := strconv.Atoi(value)
+ return err == nil && port >= 1 && port <= 65535
+}
+
+func parseLocationPath(location *crossplane.Directive) string {
+ path := "/"
+ if len(location.Args) > 0 {
+ path = location.Args[0]
+ }
+ return path
+}
+
+func statusAPICallback(parent *crossplane.Directive, current *crossplane.Directive) string {
+ plusUrls, ossUrls := parseStatusAPIEndpoints(parent, current)
+
+ for _, url := range plusUrls {
+ if pingStatusAPIEndpoint(url) {
+ log.Debugf("api at %q found", url)
+ return url
+ }
+ log.Debugf("api at %q is not reachable", url)
+ }
+
+ for _, url := range ossUrls {
+ if pingStatusAPIEndpoint(url) {
+ log.Debugf("stub_status at %q found", url)
+ return url
+ }
+ log.Debugf("stub_status at %q is not reachable", url)
+ }
+
+ return ""
+}
+
+// pingStatusAPIEndpoint ensures the statusAPI is reachable from the agent
+func pingStatusAPIEndpoint(statusAPI string) bool {
+ client := http.Client{Timeout: 50 * time.Millisecond}
+
+ if _, err := client.Head(statusAPI); err != nil {
+ return false
+ }
+ return true
+}
+
+func GetStatusApiInfo(confFile string) (statusApi string, err error) {
+ payload, err := crossplane.Parse(confFile,
+ &crossplane.ParseOptions{
+ SingleFile: false,
+ StopParsingOnError: true,
+ CombineConfigs: true,
+ },
+ )
+ if err != nil {
+ return "", fmt.Errorf("error reading config from %s, error: %s", confFile, err)
+ }
+
+ for _, xpConf := range payload.Config {
+ statusApi = CrossplaneConfigTraverseStr(&xpConf, statusAPICallback)
+ if statusApi != "" {
+ return statusApi, nil
+ }
+ }
+ return "", errors.New("no status api reachable from the agent found")
+}
+
+func GetErrorAndAccessLogs(confFile string) (*proto.ErrorLogs, *proto.AccessLogs, error) {
+ nginxConfig := &proto.NginxConfig{
+ Action: proto.NginxConfigAction_RETURN,
+ ConfigData: nil,
+ Zconfig: nil,
+ Zaux: nil,
+ AccessLogs: &proto.AccessLogs{AccessLog: make([]*proto.AccessLog, 0)},
+ ErrorLogs: &proto.ErrorLogs{ErrorLog: make([]*proto.ErrorLog, 0)},
+ Ssl: &proto.SslCertificates{SslCerts: make([]*proto.SslCertificate, 0)},
+ DirectoryMap: &proto.DirectoryMap{Directories: make([]*proto.Directory, 0)},
+ }
+
+ payload, err := crossplane.Parse(confFile,
+ &crossplane.ParseOptions{
+ SingleFile: false,
+ StopParsingOnError: true,
+ },
+ )
+ if err != nil {
+ return nginxConfig.ErrorLogs, nginxConfig.AccessLogs, err
+ }
+
+ seen := make(map[string]struct{})
+ for _, xpConf := range payload.Config {
+ var err error
+ err = CrossplaneConfigTraverse(&xpConf,
+ func(parent *crossplane.Directive, current *crossplane.Directive) (bool, error) {
+ switch current.Directive {
+ case "access_log":
+ updateNginxConfigWithAccessLogPath(current.Args[0], nginxConfig, seen)
+ case "error_log":
+ updateNginxConfigWithErrorLogPath(current.Args[0], nginxConfig, seen)
+ }
+ return true, nil
+ })
+ return nginxConfig.ErrorLogs, nginxConfig.AccessLogs, err
+ }
+ return nginxConfig.ErrorLogs, nginxConfig.AccessLogs, err
+}
+
+func GetErrorLogs(errorLogs *proto.ErrorLogs) []string {
+ result := []string{}
+ for _, log := range errorLogs.ErrorLog {
+ result = append(result, log.Name)
+ }
+ return result
+}
+
+func GetAccessLogs(accessLogs *proto.AccessLogs) []string {
+ result := []string{}
+ for _, log := range accessLogs.AccessLog {
+ result = append(result, log.Name)
+ }
+ return result
+}
+
+// allowedPath return true if the provided path has a prefix in the allowedDirectories, false otherwise. The
+// path could be a filepath or directory.
+func allowedPath(path string, allowedDirectories map[string]struct{}) bool {
+ for d := range allowedDirectories {
+ if strings.HasPrefix(path, d) {
+ return true
+ }
+ }
+ return false
+}
+
+func convertToHexFormat(hexString string) string {
+ hexString = strings.ToUpper(hexString)
+ formatted := ""
+ for i := 0; i < len(hexString); i++ {
+ if i > 0 && i%2 == 0 {
+ formatted += ":"
+ }
+ formatted += string(hexString[i])
+ }
+ return formatted
+}
+
+func GetAppProtectPolicyAndSecurityLogFiles(cfg *proto.NginxConfig) ([]string, []string) {
+ policyMap := make(map[string]bool)
+ profileMap := make(map[string]bool)
+
+ for _, directory := range cfg.GetDirectoryMap().GetDirectories() {
+ for _, file := range directory.GetFiles() {
+ confFile := path.Join(directory.GetName(), file.GetName())
+
+ payload, err := crossplane.Parse(confFile,
+ &crossplane.ParseOptions{
+ SingleFile: false,
+ StopParsingOnError: true,
+ },
+ )
+
+ if err != nil {
+ continue
+ }
+
+ for _, conf := range payload.Config {
+ err = CrossplaneConfigTraverse(&conf,
+ func(parent *crossplane.Directive, directive *crossplane.Directive) (bool, error) {
+ switch directive.Directive {
+ case "app_protect_policy_file":
+ if len(directive.Args) == 1 {
+ _, policy := path.Split(directive.Args[0])
+ policyMap[policy] = true
+ }
+ case "app_protect_security_log":
+ if len(directive.Args) == 2 {
+ _, profile := path.Split(directive.Args[0])
+ profileMap[profile] = true
+ }
+ }
+ return true, nil
+ })
+ if err != nil {
+ continue
+ }
+ }
+ if err != nil {
+ continue
+ }
+ }
+ }
+ policies := []string{}
+ for policy := range policyMap {
+ policies = append(policies, policy)
+ }
+
+ profiles := []string{}
+ for profile := range profileMap {
+ profiles = append(profiles, profile)
+ }
+
+ return policies, profiles
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/files/file_helpers.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/files/file_helpers.go
new file mode 100644
index 000000000..856483bf0
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/files/file_helpers.go
@@ -0,0 +1,38 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package files
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/gogo/protobuf/types"
+)
+
+func GetFileMode(mode string) os.FileMode {
+ result, err := strconv.ParseInt(mode, 8, 32)
+ if err != nil {
+ return os.FileMode(0644)
+ }
+
+ return os.FileMode(result)
+}
+
+func GetPermissions(fileMode os.FileMode) string {
+ return fmt.Sprintf("%#o", fileMode.Perm())
+}
+
+func TimeConvert(t time.Time) *types.Timestamp {
+ ts, err := types.TimestampProto(t)
+ if err != nil {
+ return types.TimestampNow()
+ }
+ return ts
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/conts.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/conts.go
new file mode 100644
index 000000000..9b77cf22f
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/conts.go
@@ -0,0 +1,12 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package grpc
+
+const (
+ DefaultContentChunkSize = 4 * 1024
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/grpc.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/grpc.go
new file mode 100644
index 000000000..28529f052
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/grpc.go
@@ -0,0 +1,193 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package grpc
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
+ grpc_validator "github.com/grpc-ecosystem/go-grpc-middleware/validator"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+
+ "github.com/nginx/agent/sdk/v2/interceptors"
+ "github.com/nginx/agent/sdk/v2/proto"
+)
+
+type clientAuth struct {
+ UUID string
+ Token string
+}
+
+var (
+ // DefaultClientDialOptions are default settings for a connection to the dataplane
+ DefaultClientDialOptions = []grpc.DialOption{
+ grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor()),
+ grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor()),
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: 120 * time.Second,
+ Timeout: 60 * time.Second,
+ PermitWithoutStream: true,
+ }),
+ }
+
+ DefaultServerDialOptions = []grpc.ServerOption{
+ grpc.ChainUnaryInterceptor(grpc_validator.UnaryServerInterceptor()),
+ grpc.StreamInterceptor(grpc_validator.StreamServerInterceptor()),
+ grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: 60 * time.Second,
+ PermitWithoutStream: true,
+ }),
+ }
+)
+
+// NewGrpcConnection -
+func NewGrpcConnection(target string, dialOptions []grpc.DialOption) (*grpc.ClientConn, error) {
+ if dialOptions == nil {
+ dialOptions = DefaultClientDialOptions
+ }
+
+ return NewGrpcConnectionWithContext(context.TODO(), target, dialOptions)
+}
+
+// NewGrpcConnectionWithContext -
+func NewGrpcConnectionWithContext(ctx context.Context, server string, dialOptions []grpc.DialOption) (*grpc.ClientConn, error) {
+ if dialOptions == nil {
+ dialOptions = DefaultClientDialOptions
+ }
+
+ return grpc.DialContext(ctx, server, dialOptions...)
+}
+
+// SecureDialOptions returns dialOptions with tls support
+func SecureDialOptions(tlsEnabled bool, certPath string, keyPath string, caPath string, serverName string, skipVerify bool) (grpc.DialOption, error) {
+ if !tlsEnabled {
+ return grpc.WithTransportCredentials(insecure.NewCredentials()), nil
+ }
+ transCreds, err := getTransportCredentials(certPath, keyPath, caPath, serverName, skipVerify)
+ if err != nil {
+ return nil, fmt.Errorf("failed to configure tls: %w", err)
+ }
+ return grpc.WithTransportCredentials(transCreds), nil
+}
+
+// DataplaneConnectionDialOptions returns dialOptions for connecting to a dataplane instance
+func DataplaneConnectionDialOptions(Token string, meta *proto.Metadata) []grpc.DialOption {
+ dataplaneDialOptions := []grpc.DialOption{}
+ if Token != "" {
+ c := &clientAuth{UUID: meta.GetClientId(), Token: Token}
+
+ authInterceptor := interceptors.NewClientAuth(c.UUID, c.Token, []interceptors.Option{
+ interceptors.WithBearerToken(c.Token),
+ }...)
+
+ dataplaneDialOptions = []grpc.DialOption{
+ grpc.WithStreamInterceptor(authInterceptor.Stream()),
+ grpc.WithUnaryInterceptor(authInterceptor.Unary()),
+ }
+ }
+ return dataplaneDialOptions
+}
+
+// GetCallOptions -
+func GetCallOptions() []grpc.CallOption {
+ callOptions := []grpc.CallOption{
+ grpc_retry.WithCodes(codes.NotFound),
+ grpc.WaitForReady(true),
+ }
+ return callOptions
+}
+
+// GetCommandClient returns a commanderClient with that grpc connection
+func GetCommandClient(conn *grpc.ClientConn) proto.CommanderClient {
+ return proto.NewCommanderClient(conn)
+}
+
+// GetCommandChannel returns a channel that commands are sent over
+func GetCommandChannel(client proto.CommanderClient) (proto.Commander_CommandChannelClient, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ return client.CommandChannel(ctx, GetCallOptions()...)
+}
+
+func getTransportCredentials(certPath string, keyPath string, caPath string, serverName string, skipVerify bool) (credentials.TransportCredentials, error) {
+ tlsConfig := &tls.Config{
+ // note: ServerName is ignored if InsecureSkipVerify is true
+ ServerName: serverName,
+ InsecureSkipVerify: skipVerify,
+ }
+
+ err := appendRootCAs(tlsConfig, caPath)
+ if err != nil {
+ return nil, err
+ }
+
+ err = appendCertKeyPair(tlsConfig, certPath, keyPath)
+ if err != nil {
+ return nil, err
+ }
+
+ return credentials.NewTLS(tlsConfig), nil
+}
+
+// appendRootCAs will read, parse, and append any certificates found in the
+// file at caFile to the RootCAs in the provided tls Config. If no filepath
+// is provided the tls Config is unmodified. By default, if there are no RootCAs
+// in the tls Config, it will automatically use the host OS's CA pool.
+func appendRootCAs(tlsConfig *tls.Config, caFile string) error {
+ if caFile == "" {
+ return nil
+ }
+
+ ca, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return fmt.Errorf("could not read CA file (%s): %w", caFile, err)
+ }
+
+ // If CAs have already been set, append to existing
+ caPool := tlsConfig.RootCAs
+ if caPool == nil {
+ caPool = x509.NewCertPool()
+ }
+
+ if !caPool.AppendCertsFromPEM(ca) {
+ return fmt.Errorf("could not parse CA cert (%s)", caFile)
+ }
+
+ tlsConfig.RootCAs = caPool
+ return nil
+}
+
+// appendCertKeyPair will attempt to load a cert and key pair from the provided
+// filepaths and append to the Certificates list in the provided tls Config. If
+// no files are provided the tls Config is unmodified. If only one file (key or
+// cert) is provided, an error is produced.
+func appendCertKeyPair(tlsConfig *tls.Config, certFile string, keyFile string) error {
+ if certFile == "" && keyFile == "" {
+ return nil
+ }
+ if certFile == "" || keyFile == "" {
+ return fmt.Errorf("cert and key must both be provided")
+ }
+
+ certificate, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return fmt.Errorf("could not load X509 keypair: %w", err)
+ }
+
+ tlsConfig.Certificates = append(tlsConfig.Certificates, certificate)
+ return nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/meta.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/meta.go
new file mode 100644
index 000000000..1d0ea2b53
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/grpc/meta.go
@@ -0,0 +1,42 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package grpc
+
+import (
+ "github.com/gogo/protobuf/types"
+
+ sdk "github.com/nginx/agent/sdk/v2/proto"
+)
+
+var (
+ meta = &sdk.Metadata{}
+)
+
+func InitMeta(clientID, cloudAccountID string) {
+ meta.ClientId = clientID
+ meta.CloudAccountId = cloudAccountID
+}
+
+func NewMessageMeta(messageID string) *sdk.Metadata {
+ return &sdk.Metadata{
+ Timestamp: types.TimestampNow(),
+ ClientId: meta.ClientId,
+ CloudAccountId: meta.CloudAccountId,
+ MessageId: messageID,
+ }
+}
+
+// NewMeta returns a new Metadata struct defined in the sdk/proto folder
+func NewMeta(clientID, messageID, cloudID string) *sdk.Metadata {
+ return &sdk.Metadata{
+ Timestamp: types.TimestampNow(),
+ ClientId: clientID,
+ MessageId: messageID,
+ CloudAccountId: cloudID,
+ }
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/client.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/client.go
new file mode 100644
index 000000000..93397f80d
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/client.go
@@ -0,0 +1,116 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package interceptors
+
+import (
+ "context"
+
+ log "github.com/sirupsen/logrus"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ // TokenHeader in outbound metadata for an authorization token
+ TokenHeader = "authorization"
+ // IDHeader in an outbound metadata for a client ID
+ IDHeader = "uuid"
+ // BearerHeader in an outbound metadata for a bearer token (typically a JWT)
+ BearerHeader = "bearer"
+)
+
+type clientInterceptor struct {
+ log *log.Logger
+ uuid string
+ token string
+ bearer string
+}
+
+// NewClientAuth for outbound authenticated connections
+func NewClientAuth(uuid, token string, opts ...Option) *clientInterceptor {
+ opt := &option{client: &clientInterceptor{uuid: uuid, token: token}}
+ for _, o := range opts {
+ o(opt)
+ }
+ if opt.client.log == nil {
+ opt.client.log = log.New()
+ }
+ return opt.client
+}
+
+// WithBearerToken to skip the API Token auth
+func WithBearerToken(bearer string) Option {
+ return func(opt *option) {
+ opt.client.bearer = bearer
+ }
+}
+
+func (c *clientInterceptor) Unary() grpc.UnaryClientInterceptor {
+ return func(ctx context.Context,
+ method string,
+ req, reply interface{},
+ cc *grpc.ClientConn,
+ invoker grpc.UnaryInvoker,
+ opts ...grpc.CallOption) error {
+ c.log.Debugf("--> client unary interceptor: %s", method)
+ return invoker(c.attachToken(ctx), method, req, reply, cc, opts...)
+ }
+}
+
+func (c *clientInterceptor) Stream() grpc.StreamClientInterceptor {
+ return func(
+ ctx context.Context,
+ desc *grpc.StreamDesc,
+ cc *grpc.ClientConn,
+ method string,
+ streamer grpc.Streamer,
+ opts ...grpc.CallOption,
+ ) (grpc.ClientStream, error) {
+ c.log.Debugf("--> client stream interceptor: %s", method)
+
+ return streamer(c.attachToken(ctx), desc, cc, method, opts...)
+ }
+}
+
+// GetRequestMetadata satisfy the interface grpc.PerRPCCredentials, by setting the auth token, and client id for the
+// context. see: https://godoc.org/google.golang.org/grpc/credentials#PerRPCCredentials
+func (c *clientInterceptor) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
+ return map[string]string{
+ TokenHeader: c.token,
+ IDHeader: c.uuid,
+ BearerHeader: c.bearer,
+ }, nil
+}
+
+// RequireTransportSecurity satisfy the interface grpc.PerRPCCredentials.
+func (c *clientInterceptor) RequireTransportSecurity() bool {
+ return false
+}
+
+func (c *clientInterceptor) attachToken(ctx context.Context) context.Context {
+ return metadata.AppendToOutgoingContext(ctx,
+ TokenHeader, c.token,
+ IDHeader, c.uuid,
+ BearerHeader, c.bearer,
+ )
+}
+
+type option struct {
+ client *clientInterceptor
+}
+
+// Authenticator Auth-s the initial connection then allows validation at any point of the stream
+type Authenticator interface {
+ Auth(ctx context.Context) error
+ ValidateClientToken(ctx context.Context) (Claims, error)
+}
+
+// Claims contain information about the VerifiedClientToken
+type Claims map[string]interface{}
+
+type Option func(opt *option)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/interceptors.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/interceptors.go
new file mode 100644
index 000000000..1412afe47
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/interceptors/interceptors.go
@@ -0,0 +1,23 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package interceptors
+
+import (
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+type Interceptor interface {
+ Stream() grpc.StreamClientInterceptor
+ Unary() grpc.UnaryClientInterceptor
+}
+
+type ClientInterceptor interface {
+ credentials.PerRPCCredentials
+ Interceptor
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.pb.go
new file mode 100644
index 000000000..ca50f0ae9
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.pb.go
@@ -0,0 +1,2922 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: agent.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ types "github.com/gogo/protobuf/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Different status codes for agent connect response
+type AgentConnectStatus_StatusCode int32
+
+const (
+ // Unknown status of the agent connect request
+ AgentConnectStatus_CONNECT_UNKNOWN AgentConnectStatus_StatusCode = 0
+ // Agent connect request was successful
+ AgentConnectStatus_CONNECT_OK AgentConnectStatus_StatusCode = 1
+ // Agent connect request was rejected
+ AgentConnectStatus_CONNECT_REJECTED_OTHER AgentConnectStatus_StatusCode = 2
+ // Agent connect request was rejected because an agent with the same ID is already registered
+ AgentConnectStatus_CONNECT_REJECTED_DUP_ID AgentConnectStatus_StatusCode = 3
+)
+
+var AgentConnectStatus_StatusCode_name = map[int32]string{
+ 0: "CONNECT_UNKNOWN",
+ 1: "CONNECT_OK",
+ 2: "CONNECT_REJECTED_OTHER",
+ 3: "CONNECT_REJECTED_DUP_ID",
+}
+
+var AgentConnectStatus_StatusCode_value = map[string]int32{
+ "CONNECT_UNKNOWN": 0,
+ "CONNECT_OK": 1,
+ "CONNECT_REJECTED_OTHER": 2,
+ "CONNECT_REJECTED_DUP_ID": 3,
+}
+
+func (x AgentConnectStatus_StatusCode) String() string {
+ return proto.EnumName(AgentConnectStatus_StatusCode_name, int32(x))
+}
+
+func (AgentConnectStatus_StatusCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{1, 0}
+}
+
+// Log level enum
+type AgentLogging_Level int32
+
+const (
+ // info level
+ AgentLogging_INFO AgentLogging_Level = 0
+ // debug level
+ AgentLogging_DEBUG AgentLogging_Level = 1
+ // warn level
+ AgentLogging_WARN AgentLogging_Level = 2
+ // error level
+ AgentLogging_ERROR AgentLogging_Level = 3
+ // fatal level
+ AgentLogging_FATAL AgentLogging_Level = 4
+)
+
+var AgentLogging_Level_name = map[int32]string{
+ 0: "INFO",
+ 1: "DEBUG",
+ 2: "WARN",
+ 3: "ERROR",
+ 4: "FATAL",
+}
+
+var AgentLogging_Level_value = map[string]int32{
+ "INFO": 0,
+ "DEBUG": 1,
+ "WARN": 2,
+ "ERROR": 3,
+ "FATAL": 4,
+}
+
+func (x AgentLogging_Level) String() string {
+ return proto.EnumName(AgentLogging_Level_name, int32(x))
+}
+
+func (AgentLogging_Level) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{6, 0}
+}
+
+// Represents an agent connect request that is sent from the agent to the management server
+type AgentConnectRequest struct {
+ // Provides meta information about the agent
+ Meta *AgentMeta `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta"`
+ // Provides information about the NGINX instances that are present.
+ // This data will be moving to dataplane_software_details in a future release
+ Details []*NginxDetails `protobuf:"bytes,3,rep,name=details,proto3" json:"details"`
+ // Provides information about the host system
+ Host *HostInfo `protobuf:"bytes,4,opt,name=host,proto3" json:"host"`
+ // Provides information about software installed in the system (e.g. App Protect WAF, NGINX, etc.)
+ DataplaneSoftwareDetails []*DataplaneSoftwareDetails `protobuf:"bytes,5,rep,name=dataplane_software_details,json=dataplaneSoftwareDetails,proto3" json:"dataplane_software_details"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentConnectRequest) Reset() { *m = AgentConnectRequest{} }
+func (m *AgentConnectRequest) String() string { return proto.CompactTextString(m) }
+func (*AgentConnectRequest) ProtoMessage() {}
+func (*AgentConnectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{0}
+}
+func (m *AgentConnectRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentConnectRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentConnectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentConnectRequest.Merge(m, src)
+}
+func (m *AgentConnectRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentConnectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentConnectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentConnectRequest proto.InternalMessageInfo
+
+func (m *AgentConnectRequest) GetMeta() *AgentMeta {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *AgentConnectRequest) GetDetails() []*NginxDetails {
+ if m != nil {
+ return m.Details
+ }
+ return nil
+}
+
+func (m *AgentConnectRequest) GetHost() *HostInfo {
+ if m != nil {
+ return m.Host
+ }
+ return nil
+}
+
+func (m *AgentConnectRequest) GetDataplaneSoftwareDetails() []*DataplaneSoftwareDetails {
+ if m != nil {
+ return m.DataplaneSoftwareDetails
+ }
+ return nil
+}
+
+// Represents an agent connect status
+type AgentConnectStatus struct {
+ // Provides a status of the agent connect response
+ StatusCode AgentConnectStatus_StatusCode `protobuf:"varint,1,opt,name=statusCode,proto3,enum=f5.nginx.agent.sdk.AgentConnectStatus_StatusCode" json:"status_code"`
+ // Provides a user friendly message to describe the response
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
+ // Provides an error message of why the agent connect request was rejected
+ Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentConnectStatus) Reset() { *m = AgentConnectStatus{} }
+func (m *AgentConnectStatus) String() string { return proto.CompactTextString(m) }
+func (*AgentConnectStatus) ProtoMessage() {}
+func (*AgentConnectStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{1}
+}
+func (m *AgentConnectStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentConnectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentConnectStatus.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentConnectStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentConnectStatus.Merge(m, src)
+}
+func (m *AgentConnectStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentConnectStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentConnectStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentConnectStatus proto.InternalMessageInfo
+
+func (m *AgentConnectStatus) GetStatusCode() AgentConnectStatus_StatusCode {
+ if m != nil {
+ return m.StatusCode
+ }
+ return AgentConnectStatus_CONNECT_UNKNOWN
+}
+
+func (m *AgentConnectStatus) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *AgentConnectStatus) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
+
+// Represents an agent connect response that is sent from the management server to the agent
+type AgentConnectResponse struct {
+ // Agent configuration
+ AgentConfig *AgentConfig `protobuf:"bytes,1,opt,name=agent_config,json=agentConfig,proto3" json:"agent_config"`
+ // Agent connect request status
+ Status *AgentConnectStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentConnectResponse) Reset() { *m = AgentConnectResponse{} }
+func (m *AgentConnectResponse) String() string { return proto.CompactTextString(m) }
+func (*AgentConnectResponse) ProtoMessage() {}
+func (*AgentConnectResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{2}
+}
+func (m *AgentConnectResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentConnectResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentConnectResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentConnectResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentConnectResponse.Merge(m, src)
+}
+func (m *AgentConnectResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentConnectResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentConnectResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentConnectResponse proto.InternalMessageInfo
+
+func (m *AgentConnectResponse) GetAgentConfig() *AgentConfig {
+ if m != nil {
+ return m.AgentConfig
+ }
+ return nil
+}
+
+func (m *AgentConnectResponse) GetStatus() *AgentConnectStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+// Represents an agent config request that is sent from the agent to the management server.
+// This is used by the agent to request the agent configuration from the management server.
+type AgentConfigRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentConfigRequest) Reset() { *m = AgentConfigRequest{} }
+func (m *AgentConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*AgentConfigRequest) ProtoMessage() {}
+func (*AgentConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{3}
+}
+func (m *AgentConfigRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentConfigRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentConfigRequest.Merge(m, src)
+}
+func (m *AgentConfigRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentConfigRequest proto.InternalMessageInfo
+
+// Represents an agent's configuration. The message is sent from the management server to the agent.
+type AgentConfig struct {
+ // Provides information about the agent
+ Details *AgentDetails `protobuf:"bytes,1,opt,name=details,proto3" json:"details"`
+ // Provides information about the agent logging.
+ // This is will be implemented in a future release.
+ Loggers *AgentLogging `protobuf:"bytes,2,opt,name=loggers,proto3" json:"loggers"`
+ // Provides meta information about the nginx configurations
+ Configs *ConfigReport `protobuf:"bytes,3,opt,name=configs,proto3" json:"configs"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentConfig) Reset() { *m = AgentConfig{} }
+func (m *AgentConfig) String() string { return proto.CompactTextString(m) }
+func (*AgentConfig) ProtoMessage() {}
+func (*AgentConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{4}
+}
+func (m *AgentConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentConfig.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentConfig.Merge(m, src)
+}
+func (m *AgentConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentConfig proto.InternalMessageInfo
+
+func (m *AgentConfig) GetDetails() *AgentDetails {
+ if m != nil {
+ return m.Details
+ }
+ return nil
+}
+
+func (m *AgentConfig) GetLoggers() *AgentLogging {
+ if m != nil {
+ return m.Loggers
+ }
+ return nil
+}
+
+func (m *AgentConfig) GetConfigs() *ConfigReport {
+ if m != nil {
+ return m.Configs
+ }
+ return nil
+}
+
+// Represents agent details. This message is sent from the management server to the agent.
+type AgentDetails struct {
+ // List of agent feature that are enabled
+ Features []string `protobuf:"bytes,1,rep,name=features,proto3" json:"features"`
+ // List of agent extensions that are enabled
+ Extensions []string `protobuf:"bytes,2,rep,name=extensions,proto3" json:"extensions"`
+ // List of tags
+ Tags []string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags"`
+ // Alias name for the agent
+ Alias string `protobuf:"bytes,4,opt,name=alias,proto3" json:"alias"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentDetails) Reset() { *m = AgentDetails{} }
+func (m *AgentDetails) String() string { return proto.CompactTextString(m) }
+func (*AgentDetails) ProtoMessage() {}
+func (*AgentDetails) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{5}
+}
+func (m *AgentDetails) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentDetails.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentDetails) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentDetails.Merge(m, src)
+}
+func (m *AgentDetails) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentDetails) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentDetails proto.InternalMessageInfo
+
+func (m *AgentDetails) GetFeatures() []string {
+ if m != nil {
+ return m.Features
+ }
+ return nil
+}
+
+func (m *AgentDetails) GetExtensions() []string {
+ if m != nil {
+ return m.Extensions
+ }
+ return nil
+}
+
+func (m *AgentDetails) GetTags() []string {
+ if m != nil {
+ return m.Tags
+ }
+ return nil
+}
+
+func (m *AgentDetails) GetAlias() string {
+ if m != nil {
+ return m.Alias
+ }
+ return ""
+}
+
+// Represents agent logging details
+type AgentLogging struct {
+ // Log level
+ Level AgentLogging_Level `protobuf:"varint,1,opt,name=level,proto3,enum=f5.nginx.agent.sdk.AgentLogging_Level" json:"level"`
+ // Directory where the logs are located
+ Dir string `protobuf:"bytes,2,opt,name=dir,proto3" json:"dir"`
+ // Name of the log file
+ File string `protobuf:"bytes,3,opt,name=file,proto3" json:"file"`
+ // Max size of the log file in MB
+ MaxSize uint32 `protobuf:"varint,4,opt,name=max_size,json=maxSize,proto3" json:"max_size"`
+ // Max number of backups
+ MaxBackups uint32 `protobuf:"varint,5,opt,name=max_backups,json=maxBackups,proto3" json:"max_backups"`
+ // Max age of a log file in days
+ MaxAge uint32 `protobuf:"varint,6,opt,name=max_age,json=maxAge,proto3" json:"max_age"`
+ // Is the log file compressed
+ Compress bool `protobuf:"varint,7,opt,name=compress,proto3" json:"compress"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentLogging) Reset() { *m = AgentLogging{} }
+func (m *AgentLogging) String() string { return proto.CompactTextString(m) }
+func (*AgentLogging) ProtoMessage() {}
+func (*AgentLogging) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{6}
+}
+func (m *AgentLogging) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentLogging) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentLogging.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentLogging) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentLogging.Merge(m, src)
+}
+func (m *AgentLogging) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentLogging) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentLogging.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentLogging proto.InternalMessageInfo
+
+func (m *AgentLogging) GetLevel() AgentLogging_Level {
+ if m != nil {
+ return m.Level
+ }
+ return AgentLogging_INFO
+}
+
+func (m *AgentLogging) GetDir() string {
+ if m != nil {
+ return m.Dir
+ }
+ return ""
+}
+
+func (m *AgentLogging) GetFile() string {
+ if m != nil {
+ return m.File
+ }
+ return ""
+}
+
+func (m *AgentLogging) GetMaxSize() uint32 {
+ if m != nil {
+ return m.MaxSize
+ }
+ return 0
+}
+
+func (m *AgentLogging) GetMaxBackups() uint32 {
+ if m != nil {
+ return m.MaxBackups
+ }
+ return 0
+}
+
+func (m *AgentLogging) GetMaxAge() uint32 {
+ if m != nil {
+ return m.MaxAge
+ }
+ return 0
+}
+
+func (m *AgentLogging) GetCompress() bool {
+ if m != nil {
+ return m.Compress
+ }
+ return false
+}
+
+// Represents agent metadata
+type AgentMeta struct {
+ // Version of the agent
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version"`
+ // User friendly name for the agent
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name"`
+ // List of tags
+ Tag []string `protobuf:"bytes,3,rep,name=tag,proto3" json:"tag"`
+ // Instance group name used to group NGINX instances
+ InstanceGroup string `protobuf:"bytes,5,opt,name=instance_group,json=instanceGroup,proto3" json:"instance_group"`
+ // Last time agent was updated
+ Updated *types.Timestamp `protobuf:"bytes,6,opt,name=updated,proto3" json:"updated"`
+ // ID of the system where the agent is installed
+ SystemUid string `protobuf:"bytes,7,opt,name=system_uid,json=systemUid,proto3" json:"system_uid"`
+ // Provides other agent information
+ AgentDetails *AgentDetails `protobuf:"bytes,8,opt,name=agent_details,json=agentDetails,proto3" json:"agent_details"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentMeta) Reset() { *m = AgentMeta{} }
+func (m *AgentMeta) String() string { return proto.CompactTextString(m) }
+func (*AgentMeta) ProtoMessage() {}
+func (*AgentMeta) Descriptor() ([]byte, []int) {
+ return fileDescriptor_56ede974c0020f77, []int{7}
+}
+func (m *AgentMeta) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentMeta.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentMeta) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentMeta.Merge(m, src)
+}
+func (m *AgentMeta) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentMeta) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentMeta.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentMeta proto.InternalMessageInfo
+
+func (m *AgentMeta) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *AgentMeta) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *AgentMeta) GetTag() []string {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+func (m *AgentMeta) GetInstanceGroup() string {
+ if m != nil {
+ return m.InstanceGroup
+ }
+ return ""
+}
+
+func (m *AgentMeta) GetUpdated() *types.Timestamp {
+ if m != nil {
+ return m.Updated
+ }
+ return nil
+}
+
+func (m *AgentMeta) GetSystemUid() string {
+ if m != nil {
+ return m.SystemUid
+ }
+ return ""
+}
+
+func (m *AgentMeta) GetAgentDetails() *AgentDetails {
+ if m != nil {
+ return m.AgentDetails
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("f5.nginx.agent.sdk.AgentConnectStatus_StatusCode", AgentConnectStatus_StatusCode_name, AgentConnectStatus_StatusCode_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.AgentLogging_Level", AgentLogging_Level_name, AgentLogging_Level_value)
+ proto.RegisterType((*AgentConnectRequest)(nil), "f5.nginx.agent.sdk.AgentConnectRequest")
+ proto.RegisterType((*AgentConnectStatus)(nil), "f5.nginx.agent.sdk.AgentConnectStatus")
+ proto.RegisterType((*AgentConnectResponse)(nil), "f5.nginx.agent.sdk.AgentConnectResponse")
+ proto.RegisterType((*AgentConfigRequest)(nil), "f5.nginx.agent.sdk.AgentConfigRequest")
+ proto.RegisterType((*AgentConfig)(nil), "f5.nginx.agent.sdk.AgentConfig")
+ proto.RegisterType((*AgentDetails)(nil), "f5.nginx.agent.sdk.AgentDetails")
+ proto.RegisterType((*AgentLogging)(nil), "f5.nginx.agent.sdk.AgentLogging")
+ proto.RegisterType((*AgentMeta)(nil), "f5.nginx.agent.sdk.AgentMeta")
+}
+
+func init() { proto.RegisterFile("agent.proto", fileDescriptor_56ede974c0020f77) }
+
+var fileDescriptor_56ede974c0020f77 = []byte{
+ // 1081 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcd, 0x72, 0x1b, 0x45,
+ 0x10, 0xce, 0xea, 0xc7, 0x92, 0x5a, 0xb6, 0xb3, 0x4c, 0x52, 0xa0, 0x88, 0xe0, 0x55, 0xa9, 0x20,
+ 0x88, 0x2a, 0x58, 0x81, 0x52, 0x14, 0x05, 0xe1, 0x22, 0x59, 0x8a, 0x63, 0xc7, 0x48, 0xd4, 0x58,
+ 0xae, 0x50, 0x5c, 0xb6, 0xc6, 0xda, 0xd1, 0x66, 0xb1, 0x76, 0x57, 0xec, 0x8c, 0x8c, 0x9c, 0x47,
+ 0xe0, 0x21, 0xb8, 0x72, 0xe1, 0x01, 0x78, 0x04, 0x8e, 0x3c, 0xc1, 0x16, 0xe5, 0x13, 0xb5, 0x67,
+ 0x2e, 0xdc, 0xa8, 0xf9, 0x59, 0x5b, 0xc6, 0x3f, 0xc9, 0x65, 0xa6, 0xfb, 0xdb, 0xee, 0x9e, 0x9e,
+ 0x6f, 0xa6, 0x7b, 0x16, 0xaa, 0xc4, 0xa3, 0x21, 0xb7, 0xe7, 0x71, 0xc4, 0x23, 0x84, 0xa6, 0x9f,
+ 0xdb, 0xa1, 0xe7, 0x87, 0x4b, 0x5b, 0xa1, 0xcc, 0x3d, 0xae, 0xaf, 0x4f, 0xa2, 0x70, 0xea, 0x7b,
+ 0xca, 0xa2, 0xfe, 0xc0, 0x9d, 0x3b, 0x2c, 0x9a, 0xf2, 0x9f, 0x48, 0x4c, 0x1d, 0x97, 0x72, 0xe2,
+ 0xcf, 0x98, 0xfe, 0x04, 0x5e, 0xe4, 0x45, 0x5a, 0xb6, 0xbc, 0x28, 0xf2, 0x66, 0xb4, 0x2d, 0xb5,
+ 0xa3, 0xc5, 0xb4, 0xcd, 0xfd, 0x80, 0x32, 0x4e, 0x82, 0x79, 0x66, 0xfc, 0x32, 0x62, 0x7a, 0xd5,
+ 0x7a, 0x55, 0x2d, 0x29, 0x95, 0xe6, 0x3f, 0x39, 0xb8, 0xd7, 0x15, 0x8b, 0x6f, 0x47, 0x61, 0x48,
+ 0x27, 0x1c, 0xd3, 0x1f, 0x17, 0x94, 0x71, 0xf4, 0x04, 0x0a, 0x01, 0xe5, 0xa4, 0x96, 0x6b, 0x18,
+ 0xad, 0x6a, 0xe7, 0x3d, 0xfb, 0x6a, 0xa6, 0xb6, 0x74, 0xfb, 0x86, 0x72, 0xd2, 0x2b, 0xa7, 0x89,
+ 0x25, 0xcd, 0xb1, 0x1c, 0xd1, 0x0e, 0x94, 0x74, 0xae, 0xb5, 0x7c, 0x23, 0xdf, 0xaa, 0x76, 0x1a,
+ 0xd7, 0xf9, 0x0f, 0x85, 0xde, 0x57, 0x76, 0xbd, 0x6a, 0x9a, 0x58, 0x99, 0x13, 0xce, 0x04, 0xf4,
+ 0x15, 0x14, 0x44, 0xe2, 0xb5, 0x82, 0xcc, 0xe2, 0xe1, 0x75, 0x51, 0x9e, 0x45, 0x8c, 0xef, 0x86,
+ 0xd3, 0x48, 0x25, 0x21, 0xac, 0xb1, 0x1c, 0xd1, 0xcf, 0x06, 0xd4, 0x5d, 0xc2, 0xc9, 0x7c, 0x46,
+ 0x42, 0x7a, 0x85, 0xc4, 0x5a, 0x51, 0x26, 0xf6, 0xf1, 0x75, 0x21, 0xfb, 0x99, 0xd7, 0x81, 0x76,
+ 0xca, 0x92, 0xdc, 0x4a, 0x13, 0xeb, 0x96, 0x98, 0xb8, 0xe6, 0xde, 0xe0, 0xb9, 0x57, 0x28, 0x1b,
+ 0x66, 0x0e, 0x97, 0x7d, 0x97, 0x86, 0xdc, 0xe7, 0xa7, 0xcd, 0xdf, 0x72, 0x80, 0x56, 0x69, 0x3f,
+ 0xe0, 0x84, 0x2f, 0x18, 0x3a, 0x02, 0x60, 0x52, 0xda, 0x8e, 0x5c, 0x5a, 0x33, 0x1a, 0x46, 0x6b,
+ 0xb3, 0xf3, 0xd9, 0x8d, 0xdc, 0x5f, 0xf2, 0xb5, 0x0f, 0xce, 0x1d, 0x7b, 0x77, 0xd3, 0xc4, 0xaa,
+ 0xaa, 0x40, 0xce, 0x24, 0x72, 0x29, 0x5e, 0x89, 0x8a, 0x3e, 0x80, 0x52, 0x40, 0x19, 0x23, 0x1e,
+ 0x95, 0x87, 0x5b, 0x51, 0xd4, 0x6b, 0x08, 0x67, 0x02, 0xb2, 0xa0, 0x48, 0xe3, 0x38, 0x8a, 0x6b,
+ 0x79, 0x69, 0x54, 0x49, 0x13, 0x4b, 0x01, 0x58, 0x4d, 0xcd, 0x1f, 0x00, 0x2e, 0x96, 0x44, 0xf7,
+ 0xe0, 0xee, 0xf6, 0x68, 0x38, 0x1c, 0x6c, 0x8f, 0x9d, 0xc3, 0xe1, 0xf3, 0xe1, 0xe8, 0xc5, 0xd0,
+ 0xbc, 0x83, 0x36, 0x01, 0x32, 0x70, 0xf4, 0xdc, 0x34, 0x50, 0x1d, 0xde, 0xce, 0x74, 0x3c, 0xd8,
+ 0x1b, 0x6c, 0x8f, 0x07, 0x7d, 0x67, 0x34, 0x7e, 0x36, 0xc0, 0x66, 0x0e, 0xbd, 0x0b, 0xef, 0x5c,
+ 0xf9, 0xd6, 0x3f, 0xfc, 0xd6, 0xd9, 0xed, 0x9b, 0xf9, 0xe6, 0xef, 0x06, 0xdc, 0xbf, 0x7c, 0x4b,
+ 0xd9, 0x3c, 0x0a, 0x19, 0x45, 0x63, 0x58, 0x97, 0xa4, 0x38, 0xaa, 0x6a, 0x24, 0x65, 0xd5, 0x8e,
+ 0x75, 0x1b, 0x65, 0x53, 0xdf, 0xeb, 0x99, 0x69, 0x62, 0x5d, 0x72, 0xc4, 0xaa, 0x2e, 0xd5, 0x67,
+ 0xb4, 0x07, 0x6b, 0x8a, 0x30, 0x7d, 0xfd, 0x1f, 0xbd, 0xd9, 0x11, 0xf4, 0x20, 0x4d, 0x2c, 0xed,
+ 0x89, 0xf5, 0xdc, 0xbc, 0x7f, 0x71, 0xd0, 0x62, 0x1d, 0x55, 0x5e, 0xcd, 0xbf, 0x0d, 0xa8, 0xae,
+ 0xc0, 0xab, 0x15, 0xa3, 0xb6, 0xd0, 0xb8, 0x71, 0xc9, 0xdb, 0x2b, 0x66, 0x07, 0x4a, 0xb3, 0xc8,
+ 0xf3, 0x68, 0x9c, 0xe5, 0x7e, 0x73, 0xa0, 0xfd, 0xc8, 0xf3, 0xfc, 0xd0, 0x53, 0x81, 0xb4, 0x13,
+ 0xce, 0x04, 0x11, 0x48, 0x51, 0xc3, 0xe4, 0x0d, 0xb8, 0x21, 0x50, 0xb6, 0xab, 0x79, 0x14, 0x73,
+ 0x15, 0x48, 0x3b, 0xe1, 0x4c, 0x68, 0xfe, 0x6a, 0xc0, 0xfa, 0x6a, 0xe2, 0xa8, 0x05, 0xe5, 0x29,
+ 0x25, 0x7c, 0x11, 0x53, 0xb1, 0xd9, 0x7c, 0xab, 0xd2, 0x5b, 0x4f, 0x13, 0xeb, 0x1c, 0xc3, 0xe7,
+ 0x12, 0xb2, 0x01, 0xe8, 0x92, 0xd3, 0x90, 0xf9, 0x51, 0x28, 0xf6, 0x23, 0x6c, 0x37, 0xd3, 0xc4,
+ 0x5a, 0x41, 0xf1, 0x8a, 0x8c, 0x1e, 0x42, 0x81, 0x13, 0x4f, 0x35, 0x9d, 0x8a, 0x6a, 0x08, 0x42,
+ 0xc7, 0x72, 0x14, 0x37, 0x9a, 0xcc, 0x7c, 0xc2, 0x64, 0x37, 0xd1, 0x37, 0x5a, 0x02, 0x58, 0x4d,
+ 0xcd, 0x7f, 0x73, 0x3a, 0x53, 0xcd, 0x0c, 0xda, 0x81, 0xe2, 0x8c, 0x9e, 0xd0, 0x99, 0xae, 0xc4,
+ 0x47, 0xaf, 0xa3, 0xd2, 0xde, 0x17, 0xd6, 0x2a, 0xb2, 0x74, 0xc4, 0x6a, 0x42, 0x0f, 0x20, 0xef,
+ 0xfa, 0xb1, 0xae, 0xb7, 0x52, 0x9a, 0x58, 0x42, 0xc5, 0x62, 0x10, 0x39, 0x4f, 0xfd, 0x19, 0xd5,
+ 0x65, 0x26, 0x73, 0x16, 0x3a, 0x96, 0x23, 0xfa, 0x10, 0xca, 0x01, 0x59, 0x3a, 0xcc, 0x7f, 0x45,
+ 0x65, 0xda, 0x1b, 0x8a, 0xab, 0x0c, 0xc3, 0xa5, 0x80, 0x2c, 0x0f, 0xfc, 0x57, 0x14, 0x7d, 0x0a,
+ 0x55, 0x01, 0x1e, 0x91, 0xc9, 0xf1, 0x62, 0x2e, 0xba, 0x9b, 0xb0, 0x95, 0x7d, 0x60, 0x05, 0xc6,
+ 0x10, 0x90, 0x65, 0x4f, 0xc9, 0xe8, 0x7d, 0x10, 0xce, 0x8e, 0xe8, 0x03, 0x6b, 0xd2, 0x5a, 0xf5,
+ 0x01, 0x05, 0xe1, 0xb5, 0x80, 0x2c, 0xbb, 0x1e, 0x15, 0x87, 0x35, 0x89, 0x82, 0x79, 0x4c, 0x19,
+ 0xab, 0x95, 0x1a, 0x46, 0xab, 0xac, 0x12, 0xc8, 0x30, 0x7c, 0x2e, 0x35, 0xbf, 0x86, 0xa2, 0xdc,
+ 0x3e, 0x2a, 0x43, 0x61, 0x77, 0xf8, 0x74, 0x64, 0xde, 0x41, 0x15, 0x28, 0xf6, 0x07, 0xbd, 0xc3,
+ 0x1d, 0xd3, 0x10, 0xe0, 0x8b, 0x2e, 0x1e, 0x9a, 0x39, 0x01, 0x0e, 0x30, 0x1e, 0x61, 0x33, 0x2f,
+ 0xc4, 0xa7, 0xdd, 0x71, 0x77, 0xdf, 0x2c, 0x34, 0x7f, 0xc9, 0x43, 0xe5, 0xfc, 0x41, 0x11, 0x3d,
+ 0xea, 0x84, 0xc6, 0xe2, 0x50, 0x25, 0xf5, 0xba, 0x47, 0x69, 0x08, 0x67, 0x02, 0x7a, 0x0c, 0xeb,
+ 0xae, 0xcf, 0xe6, 0x33, 0x72, 0xea, 0x84, 0x24, 0xc8, 0xfa, 0x99, 0x2c, 0xee, 0x55, 0x1c, 0x57,
+ 0xb5, 0x36, 0x24, 0x01, 0x15, 0x67, 0xc1, 0x89, 0xa7, 0xef, 0x88, 0x3c, 0x0b, 0x4e, 0x3c, 0x2c,
+ 0x06, 0xf4, 0x25, 0x6c, 0xfa, 0x21, 0xe3, 0x24, 0x9c, 0x50, 0xc7, 0x8b, 0xa3, 0xc5, 0x5c, 0xf2,
+ 0x58, 0xe9, 0xa1, 0x34, 0xb1, 0xfe, 0xf7, 0x05, 0x6f, 0x64, 0xfa, 0x8e, 0x50, 0x51, 0x17, 0x4a,
+ 0x8b, 0xb9, 0x4b, 0x38, 0x75, 0x25, 0x9b, 0xd5, 0x4e, 0xdd, 0x56, 0x6f, 0xb2, 0x9d, 0xbd, 0xc9,
+ 0xf6, 0x38, 0x7b, 0x93, 0xd5, 0x6e, 0xb4, 0x39, 0xce, 0x04, 0xf4, 0x09, 0x00, 0x3b, 0x65, 0x9c,
+ 0x06, 0xce, 0xc2, 0x77, 0x25, 0xd9, 0xfa, 0xb6, 0x5f, 0xa0, 0xb8, 0xa2, 0xe4, 0x43, 0xdf, 0x45,
+ 0xdf, 0xc1, 0x86, 0xea, 0x60, 0x59, 0xe3, 0x28, 0xbf, 0x61, 0xe3, 0x78, 0x2b, 0x4d, 0xac, 0xcb,
+ 0xae, 0x58, 0xf5, 0xc2, 0x8b, 0xc7, 0xaa, 0x60, 0x16, 0x2f, 0x36, 0x28, 0x39, 0xec, 0x7d, 0xf1,
+ 0xc7, 0xd9, 0x96, 0xf1, 0xe7, 0xd9, 0x96, 0xf1, 0xd7, 0xd9, 0x96, 0xf1, 0xfd, 0x47, 0x9e, 0xcf,
+ 0x5f, 0x2e, 0x8e, 0xec, 0x49, 0x14, 0xb4, 0xe5, 0x62, 0x6d, 0x19, 0xa1, 0xcd, 0xdc, 0xe3, 0xf6,
+ 0x49, 0x47, 0xfd, 0x87, 0x3c, 0x51, 0x3b, 0x5f, 0x93, 0xd3, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0x6e, 0xed, 0x83, 0xfa, 0x08, 0x00, 0x00,
+}
+
+func (m *AgentConnectRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentConnectRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentConnectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.DataplaneSoftwareDetails) > 0 {
+ for iNdEx := len(m.DataplaneSoftwareDetails) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DataplaneSoftwareDetails[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Host != nil {
+ {
+ size, err := m.Host.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Details) > 0 {
+ for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Details[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentConnectStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentConnectStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentConnectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Error) > 0 {
+ i -= len(m.Error)
+ copy(dAtA[i:], m.Error)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Error)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.StatusCode != 0 {
+ i = encodeVarintAgent(dAtA, i, uint64(m.StatusCode))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentConnectResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentConnectResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentConnectResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Status != nil {
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.AgentConfig != nil {
+ {
+ size, err := m.AgentConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentConfigRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentConfigRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Configs != nil {
+ {
+ size, err := m.Configs.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Loggers != nil {
+ {
+ size, err := m.Loggers.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Details != nil {
+ {
+ size, err := m.Details.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentDetails) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Alias) > 0 {
+ i -= len(m.Alias)
+ copy(dAtA[i:], m.Alias)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Alias)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Tags) > 0 {
+ for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Tags[iNdEx])
+ copy(dAtA[i:], m.Tags[iNdEx])
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Tags[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Extensions) > 0 {
+ for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Extensions[iNdEx])
+ copy(dAtA[i:], m.Extensions[iNdEx])
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Extensions[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Features) > 0 {
+ for iNdEx := len(m.Features) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Features[iNdEx])
+ copy(dAtA[i:], m.Features[iNdEx])
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Features[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentLogging) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentLogging) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentLogging) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Compress {
+ i--
+ if m.Compress {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.MaxAge != 0 {
+ i = encodeVarintAgent(dAtA, i, uint64(m.MaxAge))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.MaxBackups != 0 {
+ i = encodeVarintAgent(dAtA, i, uint64(m.MaxBackups))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.MaxSize != 0 {
+ i = encodeVarintAgent(dAtA, i, uint64(m.MaxSize))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.File) > 0 {
+ i -= len(m.File)
+ copy(dAtA[i:], m.File)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.File)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Dir) > 0 {
+ i -= len(m.Dir)
+ copy(dAtA[i:], m.Dir)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Dir)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Level != 0 {
+ i = encodeVarintAgent(dAtA, i, uint64(m.Level))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentMeta) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentMeta) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.AgentDetails != nil {
+ {
+ size, err := m.AgentDetails.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.SystemUid) > 0 {
+ i -= len(m.SystemUid)
+ copy(dAtA[i:], m.SystemUid)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.SystemUid)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Updated != nil {
+ {
+ size, err := m.Updated.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAgent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.InstanceGroup) > 0 {
+ i -= len(m.InstanceGroup)
+ copy(dAtA[i:], m.InstanceGroup)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.InstanceGroup)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Tag) > 0 {
+ for iNdEx := len(m.Tag) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Tag[iNdEx])
+ copy(dAtA[i:], m.Tag[iNdEx])
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Tag[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.DisplayName) > 0 {
+ i -= len(m.DisplayName)
+ copy(dAtA[i:], m.DisplayName)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.DisplayName)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintAgent(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintAgent(dAtA []byte, offset int, v uint64) int {
+ offset -= sovAgent(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AgentConnectRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if len(m.Details) > 0 {
+ for _, e := range m.Details {
+ l = e.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ if m.Host != nil {
+ l = m.Host.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if len(m.DataplaneSoftwareDetails) > 0 {
+ for _, e := range m.DataplaneSoftwareDetails {
+ l = e.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentConnectStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.StatusCode != 0 {
+ n += 1 + sovAgent(uint64(m.StatusCode))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.Error)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentConnectResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AgentConfig != nil {
+ l = m.AgentConfig.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.Status != nil {
+ l = m.Status.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentConfigRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Details != nil {
+ l = m.Details.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.Loggers != nil {
+ l = m.Loggers.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.Configs != nil {
+ l = m.Configs.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Features) > 0 {
+ for _, s := range m.Features {
+ l = len(s)
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ if len(m.Extensions) > 0 {
+ for _, s := range m.Extensions {
+ l = len(s)
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ if len(m.Tags) > 0 {
+ for _, s := range m.Tags {
+ l = len(s)
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ l = len(m.Alias)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentLogging) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Level != 0 {
+ n += 1 + sovAgent(uint64(m.Level))
+ }
+ l = len(m.Dir)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.File)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.MaxSize != 0 {
+ n += 1 + sovAgent(uint64(m.MaxSize))
+ }
+ if m.MaxBackups != 0 {
+ n += 1 + sovAgent(uint64(m.MaxBackups))
+ }
+ if m.MaxAge != 0 {
+ n += 1 + sovAgent(uint64(m.MaxAge))
+ }
+ if m.Compress {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentMeta) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.DisplayName)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if len(m.Tag) > 0 {
+ for _, s := range m.Tag {
+ l = len(s)
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ }
+ l = len(m.InstanceGroup)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.Updated != nil {
+ l = m.Updated.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ l = len(m.SystemUid)
+ if l > 0 {
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.AgentDetails != nil {
+ l = m.AgentDetails.Size()
+ n += 1 + l + sovAgent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovAgent(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozAgent(x uint64) (n int) {
+ return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *AgentConnectRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentConnectRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentConnectRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &AgentMeta{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Details = append(m.Details, &NginxDetails{})
+ if err := m.Details[len(m.Details)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Host == nil {
+ m.Host = &HostInfo{}
+ }
+ if err := m.Host.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneSoftwareDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DataplaneSoftwareDetails = append(m.DataplaneSoftwareDetails, &DataplaneSoftwareDetails{})
+ if err := m.DataplaneSoftwareDetails[len(m.DataplaneSoftwareDetails)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentConnectStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentConnectStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentConnectStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StatusCode", wireType)
+ }
+ m.StatusCode = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StatusCode |= AgentConnectStatus_StatusCode(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentConnectResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentConnectResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentConnectResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AgentConfig == nil {
+ m.AgentConfig = &AgentConfig{}
+ }
+ if err := m.AgentConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Status == nil {
+ m.Status = &AgentConnectStatus{}
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentConfigRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentConfigRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Details == nil {
+ m.Details = &AgentDetails{}
+ }
+ if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Loggers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Loggers == nil {
+ m.Loggers = &AgentLogging{}
+ }
+ if err := m.Loggers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Configs == nil {
+ m.Configs = &ConfigReport{}
+ }
+ if err := m.Configs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentDetails) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentDetails: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Features = append(m.Features, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Extensions = append(m.Extensions, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Alias = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentLogging) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentLogging: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentLogging: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+ }
+ m.Level = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Level |= AgentLogging_Level(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Dir = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field File", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.File = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSize", wireType)
+ }
+ m.MaxSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxSize |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxBackups", wireType)
+ }
+ m.MaxBackups = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxBackups |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType)
+ }
+ m.MaxAge = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxAge |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compress", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Compress = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentMeta) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentMeta: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisplayName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tag = append(m.Tag, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InstanceGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.InstanceGroup = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Updated", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Updated == nil {
+ m.Updated = &types.Timestamp{}
+ }
+ if err := m.Updated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemUid", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SystemUid = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAgent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AgentDetails == nil {
+ m.AgentDetails = &AgentDetails{}
+ }
+ if err := m.AgentDetails.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipAgent(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthAgent
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupAgent
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthAgent
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupAgent = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.proto
new file mode 100644
index 000000000..ef7d6b720
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/agent.proto
@@ -0,0 +1,135 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "config.proto";
+import "dp_software_details.proto";
+import "gogo.proto";
+import "google/protobuf/timestamp.proto";
+import "host.proto";
+import "nginx.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents an agent connect request that is sent from the agent to the management server
+message AgentConnectRequest {
+ // DEPRECATED
+ reserved 1;
+ reserved "identity";
+ // Provides meta information about the agent
+ AgentMeta meta = 2 [(gogoproto.jsontag) = "meta"];
+ // Provides information about the NGINX instances that are present.
+ // This data will be moving to dataplane_software_details in a future release
+ repeated NginxDetails details = 3 [(gogoproto.jsontag) = "details"];
+ // Provides information about the host system
+ HostInfo host = 4 [(gogoproto.jsontag) = "host"];
+ // Provides information about software installed in the system (e.g. App Protect WAF, NGINX, etc.)
+ repeated DataplaneSoftwareDetails dataplane_software_details = 5 [(gogoproto.jsontag) = "dataplane_software_details"];
+}
+
+// Represents an agent connect status
+message AgentConnectStatus {
+ // Different status codes for agent connect response
+ enum StatusCode {
+ // Unknown status of the agent connect request
+ CONNECT_UNKNOWN = 0;
+ // Agent connect request was successful
+ CONNECT_OK = 1;
+ // Agent connect request was rejected
+ CONNECT_REJECTED_OTHER = 2;
+ // Agent connect request was rejected because an agent with the same ID is already registered
+ CONNECT_REJECTED_DUP_ID = 3;
+ }
+ // Provides a status of the agent connect response
+ StatusCode statusCode = 1 [(gogoproto.jsontag) = "status_code"];
+ // Provides a user friendly message to describe the response
+ string message = 2 [(gogoproto.jsontag) = "message"];
+ // Provides an error message of why the agent connect request was rejected
+ string error = 3 [(gogoproto.jsontag) = "error"];
+}
+
+// Represents an agent connect response that is sent from the management server to the agent
+message AgentConnectResponse {
+ // Agent configuration
+ AgentConfig agent_config = 1 [(gogoproto.jsontag) = "agent_config"];
+ // Agent connect request status
+ AgentConnectStatus status = 2 [(gogoproto.jsontag) = "status"];
+}
+
+// Represents an agent config request that is sent from the agent to the management server.
+// This is used by the agent to request the agent configuration from the management server.
+message AgentConfigRequest {}
+
+// Represents an agent's configuration. The message is sent from the management server to the agent.
+message AgentConfig {
+ // Provides information about the agent
+ AgentDetails details = 1 [(gogoproto.jsontag) = "details"];
+ // Provides information about the agent logging.
+ // This is will be implemented in a future release.
+ AgentLogging loggers = 2 [(gogoproto.jsontag) = "loggers"];
+ // Provides meta information about the nginx configurations
+ ConfigReport configs = 3 [(gogoproto.jsontag) = "configs"];
+}
+
+// Represents agent details. This message is sent from the management server to the agent.
+message AgentDetails {
+ // List of agent feature that are enabled
+ repeated string features = 1 [(gogoproto.jsontag) = "features"];
+ // List of agent extensions that are enabled
+ repeated string extensions = 2 [(gogoproto.jsontag) = "extensions"];
+ // List of tags
+ repeated string tags = 3 [(gogoproto.jsontag) = "tags"];
+ // Alias name for the agent
+ string alias = 4 [(gogoproto.jsontag) = "alias"];
+}
+
+// Represents agent logging details
+message AgentLogging {
+ // Log level enum
+ enum Level {
+ // info level
+ INFO = 0;
+ // debug level
+ DEBUG = 1;
+ // warn level
+ WARN = 2;
+ // error level
+ ERROR = 3;
+ // fatal level
+ FATAL = 4;
+ }
+ // Log level
+ Level level = 1 [(gogoproto.jsontag) = "level"];
+ // Directory where the logs are located
+ string dir = 2 [(gogoproto.jsontag) = "dir"];
+ // Name of the log file
+ string file = 3 [(gogoproto.jsontag) = "file"];
+ // Max size of the log file in MB
+ uint32 max_size = 4 [(gogoproto.jsontag) = "max_size"];
+ // Max number of backups
+ uint32 max_backups = 5 [(gogoproto.jsontag) = "max_backups"];
+ // Max age of a log file in days
+ uint32 max_age = 6 [(gogoproto.jsontag) = "max_age"];
+ // Is the log file compressed
+ bool compress = 7 [(gogoproto.jsontag) = "compress"];
+}
+
+// Represents agent metadata
+message AgentMeta {
+ // Version of the agent
+ string version = 1 [(gogoproto.jsontag) = "version"];
+ // User friendly name for the agent
+ string display_name = 2 [(gogoproto.jsontag) = "display_name"];
+ // List of tags
+ repeated string tag = 3 [(gogoproto.jsontag) = "tag"];
+ // DEPRECATED
+ reserved 4;
+ reserved "instance_name";
+ // Instance group name used to group NGINX instances
+ string instance_group = 5 [(gogoproto.jsontag) = "instance_group"];
+ // Last time agent was updated
+ google.protobuf.Timestamp updated = 6 [(gogoproto.jsontag) = "updated"];
+ // ID of the system where the agent is installed
+ string system_uid = 7 [(gogoproto.jsontag) = "system_uid"];
+ // Provides other agent information
+ AgentDetails agent_details = 8 [(gogoproto.jsontag) = "agent_details"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.pb.go
new file mode 100644
index 000000000..e53ccbf5e
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.pb.go
@@ -0,0 +1,5256 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: command.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ events "github.com/nginx/agent/sdk/v2/proto/events"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Command type enum
+type Command_CommandType int32
+
+const (
+ // All commands default to normal
+ Command_NORMAL Command_CommandType = 0
+ // The download type is used when sending NginxConfig from the management server to the agent.
+ // It is used to instruct the agent to download the NGINX config from the management server.
+ Command_DOWNLOAD Command_CommandType = 1
+ // The upload type is used when sending NginxConfig from the agent to the management server.
+ // It is used to instruct the agent to upload the NGINX config from the agent.
+ // This will be implemented in a future release.
+ Command_UPLOAD Command_CommandType = 2
+)
+
+var Command_CommandType_name = map[int32]string{
+ 0: "NORMAL",
+ 1: "DOWNLOAD",
+ 2: "UPLOAD",
+}
+
+var Command_CommandType_value = map[string]int32{
+ "NORMAL": 0,
+ "DOWNLOAD": 1,
+ "UPLOAD": 2,
+}
+
+func (x Command_CommandType) String() string {
+ return proto.EnumName(Command_CommandType_name, int32(x))
+}
+
+func (Command_CommandType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{0, 0}
+}
+
+// Command status enum
+type CommandStatusResponse_CommandStatus int32
+
+const (
+ // Unknown status of command
+ CommandStatusResponse_CMD_UNKNOWN CommandStatusResponse_CommandStatus = 0
+ // Command was successful
+ CommandStatusResponse_CMD_OK CommandStatusResponse_CommandStatus = 1
+ // Command failed
+ CommandStatusResponse_CMD_ERROR CommandStatusResponse_CommandStatus = 2
+)
+
+var CommandStatusResponse_CommandStatus_name = map[int32]string{
+ 0: "CMD_UNKNOWN",
+ 1: "CMD_OK",
+ 2: "CMD_ERROR",
+}
+
+var CommandStatusResponse_CommandStatus_value = map[string]int32{
+ "CMD_UNKNOWN": 0,
+ "CMD_OK": 1,
+ "CMD_ERROR": 2,
+}
+
+func (x CommandStatusResponse_CommandStatus) String() string {
+ return proto.EnumName(CommandStatusResponse_CommandStatus_name, int32(x))
+}
+
+func (CommandStatusResponse_CommandStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{1, 0}
+}
+
+// Command error code enum
+type CommandStatusResponse_CommandErrorCode int32
+
+const (
+ // No Error (This is the default value)
+ CommandStatusResponse_ERR_OK CommandStatusResponse_CommandErrorCode = 0
+ // Unknown error
+ CommandStatusResponse_ERR_UNKNOWN CommandStatusResponse_CommandErrorCode = 1
+)
+
+var CommandStatusResponse_CommandErrorCode_name = map[int32]string{
+ 0: "ERR_OK",
+ 1: "ERR_UNKNOWN",
+}
+
+var CommandStatusResponse_CommandErrorCode_value = map[string]int32{
+ "ERR_OK": 0,
+ "ERR_UNKNOWN": 1,
+}
+
+func (x CommandStatusResponse_CommandErrorCode) String() string {
+ return proto.EnumName(CommandStatusResponse_CommandErrorCode_name, int32(x))
+}
+
+func (CommandStatusResponse_CommandErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{1, 1}
+}
+
+// NGINX configuration status enum
+type NginxConfigStatus_Status int32
+
+const (
+ // The configuration is still in the process of being applied.
+ NginxConfigStatus_PENDING NginxConfigStatus_Status = 0
+ // The configuration has being successfully applied.
+ NginxConfigStatus_OK NginxConfigStatus_Status = 1
+ // The configuration has failed to be applied
+ NginxConfigStatus_ERROR NginxConfigStatus_Status = 2
+)
+
+var NginxConfigStatus_Status_name = map[int32]string{
+ 0: "PENDING",
+ 1: "OK",
+ 2: "ERROR",
+}
+
+var NginxConfigStatus_Status_value = map[string]int32{
+ "PENDING": 0,
+ "OK": 1,
+ "ERROR": 2,
+}
+
+func (x NginxConfigStatus_Status) String() string {
+ return proto.EnumName(NginxConfigStatus_Status_name, int32(x))
+}
+
+func (NginxConfigStatus_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{4, 0}
+}
+
+// Transfer status enum
+type UploadStatus_TransferStatus int32
+
+const (
+ // Unknown status
+ UploadStatus_UNKNOWN UploadStatus_TransferStatus = 0
+ // Upload was successful
+ UploadStatus_OK UploadStatus_TransferStatus = 1
+ // Upload failed
+ UploadStatus_FAILED UploadStatus_TransferStatus = 2
+)
+
+var UploadStatus_TransferStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "OK",
+ 2: "FAILED",
+}
+
+var UploadStatus_TransferStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "OK": 1,
+ "FAILED": 2,
+}
+
+func (x UploadStatus_TransferStatus) String() string {
+ return proto.EnumName(UploadStatus_TransferStatus_name, int32(x))
+}
+
+func (UploadStatus_TransferStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{9, 0}
+}
+
+// Represents a command message, which is used for communication between the management server and the agent.
+type Command struct {
+ // Provides metadata information associated with the command
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ // Used to determine the type of command
+ Type Command_CommandType `protobuf:"varint,2,opt,name=type,proto3,enum=f5.nginx.agent.sdk.Command_CommandType" json:"type"`
+ // Types that are valid to be assigned to Data:
+ // *Command_CmdStatus
+ // *Command_NginxConfig
+ // *Command_NginxConfigResponse
+ // *Command_AgentConnectRequest
+ // *Command_AgentConnectResponse
+ // *Command_AgentConfigRequest
+ // *Command_AgentConfig
+ // *Command_DataplaneStatus
+ // *Command_EventReport
+ // *Command_DataplaneSoftwareDetails
+ // *Command_DataplaneUpdate
+ Data isCommand_Data `protobuf_oneof:"data"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Command) Reset() { *m = Command{} }
+func (m *Command) String() string { return proto.CompactTextString(m) }
+func (*Command) ProtoMessage() {}
+func (*Command) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{0}
+}
+func (m *Command) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Command) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Command.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Command) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Command.Merge(m, src)
+}
+func (m *Command) XXX_Size() int {
+ return m.Size()
+}
+func (m *Command) XXX_DiscardUnknown() {
+ xxx_messageInfo_Command.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Command proto.InternalMessageInfo
+
+type isCommand_Data interface {
+ isCommand_Data()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Command_CmdStatus struct {
+ CmdStatus *CommandStatusResponse `protobuf:"bytes,3,opt,name=cmd_status,json=cmdStatus,proto3,oneof" json:"cmd_status"`
+}
+type Command_NginxConfig struct {
+ NginxConfig *NginxConfig `protobuf:"bytes,4,opt,name=nginx_config,json=nginxConfig,proto3,oneof" json:"nginx_config"`
+}
+type Command_NginxConfigResponse struct {
+ NginxConfigResponse *NginxConfigResponse `protobuf:"bytes,5,opt,name=nginx_config_response,json=nginxConfigResponse,proto3,oneof" json:"nginx_config_response"`
+}
+type Command_AgentConnectRequest struct {
+ AgentConnectRequest *AgentConnectRequest `protobuf:"bytes,6,opt,name=agent_connect_request,json=agentConnectRequest,proto3,oneof" json:"agent_connect_request"`
+}
+type Command_AgentConnectResponse struct {
+ AgentConnectResponse *AgentConnectResponse `protobuf:"bytes,7,opt,name=agent_connect_response,json=agentConnectResponse,proto3,oneof" json:"agent_connect_response"`
+}
+type Command_AgentConfigRequest struct {
+ AgentConfigRequest *AgentConfigRequest `protobuf:"bytes,8,opt,name=agent_config_request,json=agentConfigRequest,proto3,oneof" json:"agent_config_request"`
+}
+type Command_AgentConfig struct {
+ AgentConfig *AgentConfig `protobuf:"bytes,9,opt,name=agent_config,json=agentConfig,proto3,oneof" json:"agent_config"`
+}
+type Command_DataplaneStatus struct {
+ DataplaneStatus *DataplaneStatus `protobuf:"bytes,11,opt,name=dataplane_status,json=dataplaneStatus,proto3,oneof" json:"dataplane_status"`
+}
+type Command_EventReport struct {
+ EventReport *events.EventReport `protobuf:"bytes,12,opt,name=event_report,json=eventReport,proto3,oneof" json:"event_report"`
+}
+type Command_DataplaneSoftwareDetails struct {
+ DataplaneSoftwareDetails *DataplaneSoftwareDetails `protobuf:"bytes,13,opt,name=dataplane_software_details,json=dataplaneSoftwareDetails,proto3,oneof" json:"dataplane_software_details"`
+}
+type Command_DataplaneUpdate struct {
+ DataplaneUpdate *DataplaneUpdate `protobuf:"bytes,14,opt,name=dataplane_update,json=dataplaneUpdate,proto3,oneof" json:"dataplane_update"`
+}
+
+func (*Command_CmdStatus) isCommand_Data() {}
+func (*Command_NginxConfig) isCommand_Data() {}
+func (*Command_NginxConfigResponse) isCommand_Data() {}
+func (*Command_AgentConnectRequest) isCommand_Data() {}
+func (*Command_AgentConnectResponse) isCommand_Data() {}
+func (*Command_AgentConfigRequest) isCommand_Data() {}
+func (*Command_AgentConfig) isCommand_Data() {}
+func (*Command_DataplaneStatus) isCommand_Data() {}
+func (*Command_EventReport) isCommand_Data() {}
+func (*Command_DataplaneSoftwareDetails) isCommand_Data() {}
+func (*Command_DataplaneUpdate) isCommand_Data() {}
+
+func (m *Command) GetData() isCommand_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *Command) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *Command) GetType() Command_CommandType {
+ if m != nil {
+ return m.Type
+ }
+ return Command_NORMAL
+}
+
+func (m *Command) GetCmdStatus() *CommandStatusResponse {
+ if x, ok := m.GetData().(*Command_CmdStatus); ok {
+ return x.CmdStatus
+ }
+ return nil
+}
+
+func (m *Command) GetNginxConfig() *NginxConfig {
+ if x, ok := m.GetData().(*Command_NginxConfig); ok {
+ return x.NginxConfig
+ }
+ return nil
+}
+
+func (m *Command) GetNginxConfigResponse() *NginxConfigResponse {
+ if x, ok := m.GetData().(*Command_NginxConfigResponse); ok {
+ return x.NginxConfigResponse
+ }
+ return nil
+}
+
+func (m *Command) GetAgentConnectRequest() *AgentConnectRequest {
+ if x, ok := m.GetData().(*Command_AgentConnectRequest); ok {
+ return x.AgentConnectRequest
+ }
+ return nil
+}
+
+func (m *Command) GetAgentConnectResponse() *AgentConnectResponse {
+ if x, ok := m.GetData().(*Command_AgentConnectResponse); ok {
+ return x.AgentConnectResponse
+ }
+ return nil
+}
+
+func (m *Command) GetAgentConfigRequest() *AgentConfigRequest {
+ if x, ok := m.GetData().(*Command_AgentConfigRequest); ok {
+ return x.AgentConfigRequest
+ }
+ return nil
+}
+
+func (m *Command) GetAgentConfig() *AgentConfig {
+ if x, ok := m.GetData().(*Command_AgentConfig); ok {
+ return x.AgentConfig
+ }
+ return nil
+}
+
+func (m *Command) GetDataplaneStatus() *DataplaneStatus {
+ if x, ok := m.GetData().(*Command_DataplaneStatus); ok {
+ return x.DataplaneStatus
+ }
+ return nil
+}
+
+func (m *Command) GetEventReport() *events.EventReport {
+ if x, ok := m.GetData().(*Command_EventReport); ok {
+ return x.EventReport
+ }
+ return nil
+}
+
+func (m *Command) GetDataplaneSoftwareDetails() *DataplaneSoftwareDetails {
+ if x, ok := m.GetData().(*Command_DataplaneSoftwareDetails); ok {
+ return x.DataplaneSoftwareDetails
+ }
+ return nil
+}
+
+func (m *Command) GetDataplaneUpdate() *DataplaneUpdate {
+ if x, ok := m.GetData().(*Command_DataplaneUpdate); ok {
+ return x.DataplaneUpdate
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Command) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Command_CmdStatus)(nil),
+ (*Command_NginxConfig)(nil),
+ (*Command_NginxConfigResponse)(nil),
+ (*Command_AgentConnectRequest)(nil),
+ (*Command_AgentConnectResponse)(nil),
+ (*Command_AgentConfigRequest)(nil),
+ (*Command_AgentConfig)(nil),
+ (*Command_DataplaneStatus)(nil),
+ (*Command_EventReport)(nil),
+ (*Command_DataplaneSoftwareDetails)(nil),
+ (*Command_DataplaneUpdate)(nil),
+ }
+}
+
+// Represents a command status response
+type CommandStatusResponse struct {
+ // Command status
+ Status CommandStatusResponse_CommandStatus `protobuf:"varint,1,opt,name=status,proto3,enum=f5.nginx.agent.sdk.CommandStatusResponse_CommandStatus" json:"status"`
+ // Error code
+ ErrorCode CommandStatusResponse_CommandErrorCode `protobuf:"varint,2,opt,name=error_code,json=errorCode,proto3,enum=f5.nginx.agent.sdk.CommandStatusResponse_CommandErrorCode" json:"error_code"`
+ // Provides a user friendly message to describe the response
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message"`
+ // Provides an error message of why the command failed
+ Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CommandStatusResponse) Reset() { *m = CommandStatusResponse{} }
+func (m *CommandStatusResponse) String() string { return proto.CompactTextString(m) }
+func (*CommandStatusResponse) ProtoMessage() {}
+func (*CommandStatusResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{1}
+}
+func (m *CommandStatusResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CommandStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CommandStatusResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CommandStatusResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommandStatusResponse.Merge(m, src)
+}
+func (m *CommandStatusResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *CommandStatusResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommandStatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommandStatusResponse proto.InternalMessageInfo
+
+func (m *CommandStatusResponse) GetStatus() CommandStatusResponse_CommandStatus {
+ if m != nil {
+ return m.Status
+ }
+ return CommandStatusResponse_CMD_UNKNOWN
+}
+
+func (m *CommandStatusResponse) GetErrorCode() CommandStatusResponse_CommandErrorCode {
+ if m != nil {
+ return m.ErrorCode
+ }
+ return CommandStatusResponse_ERR_OK
+}
+
+func (m *CommandStatusResponse) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *CommandStatusResponse) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
+
+// Represents a dataplane status, which is used by the agent to periodically report the status of NGINX, agent activities and other dataplane software activities.
+type DataplaneStatus struct {
+ // System ID
+ SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id"`
+ // List of NGINX details. This field will be moving to DataplaneSoftwareDetails in a future release.
+ Details []*NginxDetails `protobuf:"bytes,2,rep,name=details,proto3" json:"details"`
+ // Host information
+ Host *HostInfo `protobuf:"bytes,3,opt,name=host,proto3" json:"host"`
+ // List of NGINX health information. This field will be moving to DataplaneSoftwareHealth in a future release.
+ Healths []*NginxHealth `protobuf:"bytes,5,rep,name=healths,proto3" json:"healths"`
+ // List of software details. This includes details about NGINX and any other software installed in the system that the agent is interested in.
+ DataplaneSoftwareDetails []*DataplaneSoftwareDetails `protobuf:"bytes,6,rep,name=dataplane_software_details,json=dataplaneSoftwareDetails,proto3" json:"dataplane_software_details"`
+ // List of software health statues. This includes the health of NGINX and any other software installed in the system that the agent is interested in.
+ DataplaneSoftwareHealths []*DataplaneSoftwareHealth `protobuf:"bytes,7,rep,name=dataplane_software_healths,json=dataplaneSoftwareHealths,proto3" json:"dataplane_software_healths"`
+ // List of activity statuses. Reports on the status of activities that the agent is currently executing.
+ AgentActivityStatus []*AgentActivityStatus `protobuf:"bytes,8,rep,name=agent_activity_status,json=agentActivityStatus,proto3" json:"agent_activity_status"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DataplaneStatus) Reset() { *m = DataplaneStatus{} }
+func (m *DataplaneStatus) String() string { return proto.CompactTextString(m) }
+func (*DataplaneStatus) ProtoMessage() {}
+func (*DataplaneStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{2}
+}
+func (m *DataplaneStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataplaneStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DataplaneStatus.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DataplaneStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataplaneStatus.Merge(m, src)
+}
+func (m *DataplaneStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataplaneStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataplaneStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataplaneStatus proto.InternalMessageInfo
+
+func (m *DataplaneStatus) GetSystemId() string {
+ if m != nil {
+ return m.SystemId
+ }
+ return ""
+}
+
+func (m *DataplaneStatus) GetDetails() []*NginxDetails {
+ if m != nil {
+ return m.Details
+ }
+ return nil
+}
+
+func (m *DataplaneStatus) GetHost() *HostInfo {
+ if m != nil {
+ return m.Host
+ }
+ return nil
+}
+
+func (m *DataplaneStatus) GetHealths() []*NginxHealth {
+ if m != nil {
+ return m.Healths
+ }
+ return nil
+}
+
+func (m *DataplaneStatus) GetDataplaneSoftwareDetails() []*DataplaneSoftwareDetails {
+ if m != nil {
+ return m.DataplaneSoftwareDetails
+ }
+ return nil
+}
+
+func (m *DataplaneStatus) GetDataplaneSoftwareHealths() []*DataplaneSoftwareHealth {
+ if m != nil {
+ return m.DataplaneSoftwareHealths
+ }
+ return nil
+}
+
+func (m *DataplaneStatus) GetAgentActivityStatus() []*AgentActivityStatus {
+ if m != nil {
+ return m.AgentActivityStatus
+ }
+ return nil
+}
+
+// Represent an agent activity status
+type AgentActivityStatus struct {
+ // Types that are valid to be assigned to Status:
+ // *AgentActivityStatus_NginxConfigStatus
+ Status isAgentActivityStatus_Status `protobuf_oneof:"Status"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AgentActivityStatus) Reset() { *m = AgentActivityStatus{} }
+func (m *AgentActivityStatus) String() string { return proto.CompactTextString(m) }
+func (*AgentActivityStatus) ProtoMessage() {}
+func (*AgentActivityStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{3}
+}
+func (m *AgentActivityStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AgentActivityStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AgentActivityStatus.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AgentActivityStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AgentActivityStatus.Merge(m, src)
+}
+func (m *AgentActivityStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *AgentActivityStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_AgentActivityStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AgentActivityStatus proto.InternalMessageInfo
+
+type isAgentActivityStatus_Status interface {
+ isAgentActivityStatus_Status()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type AgentActivityStatus_NginxConfigStatus struct {
+ NginxConfigStatus *NginxConfigStatus `protobuf:"bytes,1,opt,name=nginx_config_status,json=nginxConfigStatus,proto3,oneof" json:"nginx_config_status"`
+}
+
+func (*AgentActivityStatus_NginxConfigStatus) isAgentActivityStatus_Status() {}
+
+func (m *AgentActivityStatus) GetStatus() isAgentActivityStatus_Status {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *AgentActivityStatus) GetNginxConfigStatus() *NginxConfigStatus {
+ if x, ok := m.GetStatus().(*AgentActivityStatus_NginxConfigStatus); ok {
+ return x.NginxConfigStatus
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*AgentActivityStatus) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*AgentActivityStatus_NginxConfigStatus)(nil),
+ }
+}
+
+// Represents a NGINX configuration status
+type NginxConfigStatus struct {
+ // CorrelationID is an ID used by the producer of the message to track the flow of events
+ CorrelationId string `protobuf:"bytes,1,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id"`
+ // Provides a status for the NGINX configuration
+ Status NginxConfigStatus_Status `protobuf:"varint,2,opt,name=status,proto3,enum=f5.nginx.agent.sdk.NginxConfigStatus_Status" json:"status"`
+ // Provides a user friendly message to describe the current state of the NGINX configuration.
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message"`
+ // NGINX ID
+ NginxId string `protobuf:"bytes,4,opt,name=nginx_id,json=nginxId,proto3" json:"nginx_id"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxConfigStatus) Reset() { *m = NginxConfigStatus{} }
+func (m *NginxConfigStatus) String() string { return proto.CompactTextString(m) }
+func (*NginxConfigStatus) ProtoMessage() {}
+func (*NginxConfigStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{4}
+}
+func (m *NginxConfigStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxConfigStatus.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxConfigStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxConfigStatus.Merge(m, src)
+}
+func (m *NginxConfigStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxConfigStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxConfigStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxConfigStatus proto.InternalMessageInfo
+
+func (m *NginxConfigStatus) GetCorrelationId() string {
+ if m != nil {
+ return m.CorrelationId
+ }
+ return ""
+}
+
+func (m *NginxConfigStatus) GetStatus() NginxConfigStatus_Status {
+ if m != nil {
+ return m.Status
+ }
+ return NginxConfigStatus_PENDING
+}
+
+func (m *NginxConfigStatus) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *NginxConfigStatus) GetNginxId() string {
+ if m != nil {
+ return m.NginxId
+ }
+ return ""
+}
+
+// Represents a dataplane software health
+type DataplaneSoftwareHealth struct {
+ // Types that are valid to be assigned to Health:
+ // *DataplaneSoftwareHealth_NginxHealth
+ // *DataplaneSoftwareHealth_AppProtectWafHealth
+ Health isDataplaneSoftwareHealth_Health `protobuf_oneof:"health"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DataplaneSoftwareHealth) Reset() { *m = DataplaneSoftwareHealth{} }
+func (m *DataplaneSoftwareHealth) String() string { return proto.CompactTextString(m) }
+func (*DataplaneSoftwareHealth) ProtoMessage() {}
+func (*DataplaneSoftwareHealth) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{5}
+}
+func (m *DataplaneSoftwareHealth) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataplaneSoftwareHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DataplaneSoftwareHealth.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DataplaneSoftwareHealth) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataplaneSoftwareHealth.Merge(m, src)
+}
+func (m *DataplaneSoftwareHealth) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataplaneSoftwareHealth) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataplaneSoftwareHealth.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataplaneSoftwareHealth proto.InternalMessageInfo
+
+type isDataplaneSoftwareHealth_Health interface {
+ isDataplaneSoftwareHealth_Health()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type DataplaneSoftwareHealth_NginxHealth struct {
+ NginxHealth *NginxHealth `protobuf:"bytes,1,opt,name=nginx_health,json=nginxHealth,proto3,oneof" json:"nginx_health"`
+}
+type DataplaneSoftwareHealth_AppProtectWafHealth struct {
+ AppProtectWafHealth *AppProtectWAFHealth `protobuf:"bytes,2,opt,name=app_protect_waf_health,json=appProtectWafHealth,proto3,oneof" json:"app_protect_waf_health"`
+}
+
+func (*DataplaneSoftwareHealth_NginxHealth) isDataplaneSoftwareHealth_Health() {}
+func (*DataplaneSoftwareHealth_AppProtectWafHealth) isDataplaneSoftwareHealth_Health() {}
+
+func (m *DataplaneSoftwareHealth) GetHealth() isDataplaneSoftwareHealth_Health {
+ if m != nil {
+ return m.Health
+ }
+ return nil
+}
+
+func (m *DataplaneSoftwareHealth) GetNginxHealth() *NginxHealth {
+ if x, ok := m.GetHealth().(*DataplaneSoftwareHealth_NginxHealth); ok {
+ return x.NginxHealth
+ }
+ return nil
+}
+
+func (m *DataplaneSoftwareHealth) GetAppProtectWafHealth() *AppProtectWAFHealth {
+ if x, ok := m.GetHealth().(*DataplaneSoftwareHealth_AppProtectWafHealth); ok {
+ return x.AppProtectWafHealth
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*DataplaneSoftwareHealth) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*DataplaneSoftwareHealth_NginxHealth)(nil),
+ (*DataplaneSoftwareHealth_AppProtectWafHealth)(nil),
+ }
+}
+
+// Represents a dataplane update
+type DataplaneUpdate struct {
+ // Host information
+ Host *HostInfo `protobuf:"bytes,1,opt,name=host,proto3" json:"host"`
+ // List of software details. This includes details about NGINX and any other software installed in the system that the agent is interested in.
+ DataplaneSoftwareDetails []*DataplaneSoftwareDetails `protobuf:"bytes,2,rep,name=dataplane_software_details,json=dataplaneSoftwareDetails,proto3" json:"dataplane_software_details"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DataplaneUpdate) Reset() { *m = DataplaneUpdate{} }
+func (m *DataplaneUpdate) String() string { return proto.CompactTextString(m) }
+func (*DataplaneUpdate) ProtoMessage() {}
+func (*DataplaneUpdate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{6}
+}
+func (m *DataplaneUpdate) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataplaneUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DataplaneUpdate.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DataplaneUpdate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataplaneUpdate.Merge(m, src)
+}
+func (m *DataplaneUpdate) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataplaneUpdate) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataplaneUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataplaneUpdate proto.InternalMessageInfo
+
+func (m *DataplaneUpdate) GetHost() *HostInfo {
+ if m != nil {
+ return m.Host
+ }
+ return nil
+}
+
+func (m *DataplaneUpdate) GetDataplaneSoftwareDetails() []*DataplaneSoftwareDetails {
+ if m != nil {
+ return m.DataplaneSoftwareDetails
+ }
+ return nil
+}
+
+// Represents a download request
+type DownloadRequest struct {
+ // Metadata information
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DownloadRequest) Reset() { *m = DownloadRequest{} }
+func (m *DownloadRequest) String() string { return proto.CompactTextString(m) }
+func (*DownloadRequest) ProtoMessage() {}
+func (*DownloadRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{7}
+}
+func (m *DownloadRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DownloadRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DownloadRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DownloadRequest.Merge(m, src)
+}
+func (m *DownloadRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DownloadRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DownloadRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DownloadRequest proto.InternalMessageInfo
+
+func (m *DownloadRequest) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+// Represents a NGINX config response
+type NginxConfigResponse struct {
+ // Command status
+ Status *CommandStatusResponse `protobuf:"bytes,1,opt,name=status,proto3" json:"status"`
+ // NGINX config action
+ Action NginxConfigAction `protobuf:"varint,2,opt,name=action,proto3,enum=f5.nginx.agent.sdk.NginxConfigAction" json:"action"`
+ // NGINX config description
+ ConfigData *ConfigDescriptor `protobuf:"bytes,3,opt,name=config_data,json=configData,proto3" json:"config_data"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxConfigResponse) Reset() { *m = NginxConfigResponse{} }
+func (m *NginxConfigResponse) String() string { return proto.CompactTextString(m) }
+func (*NginxConfigResponse) ProtoMessage() {}
+func (*NginxConfigResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{8}
+}
+func (m *NginxConfigResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxConfigResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxConfigResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxConfigResponse.Merge(m, src)
+}
+func (m *NginxConfigResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxConfigResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxConfigResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxConfigResponse proto.InternalMessageInfo
+
+func (m *NginxConfigResponse) GetStatus() *CommandStatusResponse {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *NginxConfigResponse) GetAction() NginxConfigAction {
+ if m != nil {
+ return m.Action
+ }
+ return NginxConfigAction_UNKNOWN
+}
+
+func (m *NginxConfigResponse) GetConfigData() *ConfigDescriptor {
+ if m != nil {
+ return m.ConfigData
+ }
+ return nil
+}
+
+// Represents an upload status
+type UploadStatus struct {
+ // Metadata information
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ // Transfer status
+ Status UploadStatus_TransferStatus `protobuf:"varint,2,opt,name=status,proto3,enum=f5.nginx.agent.sdk.UploadStatus_TransferStatus" json:"status"`
+ // Provides an error message of why the upload failed
+ Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UploadStatus) Reset() { *m = UploadStatus{} }
+func (m *UploadStatus) String() string { return proto.CompactTextString(m) }
+func (*UploadStatus) ProtoMessage() {}
+func (*UploadStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{9}
+}
+func (m *UploadStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UploadStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UploadStatus.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *UploadStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UploadStatus.Merge(m, src)
+}
+func (m *UploadStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *UploadStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_UploadStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UploadStatus proto.InternalMessageInfo
+
+func (m *UploadStatus) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *UploadStatus) GetStatus() UploadStatus_TransferStatus {
+ if m != nil {
+ return m.Status
+ }
+ return UploadStatus_UNKNOWN
+}
+
+func (m *UploadStatus) GetReason() string {
+ if m != nil {
+ return m.Reason
+ }
+ return ""
+}
+
+// Represents a data chunck
+type DataChunk struct {
+ // Types that are valid to be assigned to Chunk:
+ // *DataChunk_Header
+ // *DataChunk_Data
+ Chunk isDataChunk_Chunk `protobuf_oneof:"chunk"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DataChunk) Reset() { *m = DataChunk{} }
+func (m *DataChunk) String() string { return proto.CompactTextString(m) }
+func (*DataChunk) ProtoMessage() {}
+func (*DataChunk) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{10}
+}
+func (m *DataChunk) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DataChunk.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DataChunk) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataChunk.Merge(m, src)
+}
+func (m *DataChunk) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataChunk) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataChunk.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataChunk proto.InternalMessageInfo
+
+type isDataChunk_Chunk interface {
+ isDataChunk_Chunk()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type DataChunk_Header struct {
+ Header *ChunkedResourceHeader `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header"`
+}
+type DataChunk_Data struct {
+ Data *ChunkedResourceChunk `protobuf:"bytes,2,opt,name=data,proto3,oneof" json:"data"`
+}
+
+func (*DataChunk_Header) isDataChunk_Chunk() {}
+func (*DataChunk_Data) isDataChunk_Chunk() {}
+
+func (m *DataChunk) GetChunk() isDataChunk_Chunk {
+ if m != nil {
+ return m.Chunk
+ }
+ return nil
+}
+
+func (m *DataChunk) GetHeader() *ChunkedResourceHeader {
+ if x, ok := m.GetChunk().(*DataChunk_Header); ok {
+ return x.Header
+ }
+ return nil
+}
+
+func (m *DataChunk) GetData() *ChunkedResourceChunk {
+ if x, ok := m.GetChunk().(*DataChunk_Data); ok {
+ return x.Data
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*DataChunk) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*DataChunk_Header)(nil),
+ (*DataChunk_Data)(nil),
+ }
+}
+
+// Represents a chunked resource Header
+type ChunkedResourceHeader struct {
+ // Metadata information
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ // Number of chunks expected in the transfer
+ Chunks int32 `protobuf:"varint,2,opt,name=chunks,proto3" json:"chunks"`
+ // Chunk checksum
+ Checksum string `protobuf:"bytes,3,opt,name=checksum,proto3" json:"checksum"`
+ // Chunk size
+ ChunkSize int32 `protobuf:"varint,4,opt,name=chunk_size,json=chunkSize,proto3" json:"chunk_size"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ChunkedResourceHeader) Reset() { *m = ChunkedResourceHeader{} }
+func (m *ChunkedResourceHeader) String() string { return proto.CompactTextString(m) }
+func (*ChunkedResourceHeader) ProtoMessage() {}
+func (*ChunkedResourceHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{11}
+}
+func (m *ChunkedResourceHeader) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ChunkedResourceHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ChunkedResourceHeader.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ChunkedResourceHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ChunkedResourceHeader.Merge(m, src)
+}
+func (m *ChunkedResourceHeader) XXX_Size() int {
+ return m.Size()
+}
+func (m *ChunkedResourceHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_ChunkedResourceHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChunkedResourceHeader proto.InternalMessageInfo
+
+func (m *ChunkedResourceHeader) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *ChunkedResourceHeader) GetChunks() int32 {
+ if m != nil {
+ return m.Chunks
+ }
+ return 0
+}
+
+func (m *ChunkedResourceHeader) GetChecksum() string {
+ if m != nil {
+ return m.Checksum
+ }
+ return ""
+}
+
+func (m *ChunkedResourceHeader) GetChunkSize() int32 {
+ if m != nil {
+ return m.ChunkSize
+ }
+ return 0
+}
+
+// Represents a chunked resource chunk
+type ChunkedResourceChunk struct {
+ // Metadata information
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ // Chunk ID
+ ChunkId int32 `protobuf:"varint,2,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id"`
+ // Chunk data
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ChunkedResourceChunk) Reset() { *m = ChunkedResourceChunk{} }
+func (m *ChunkedResourceChunk) String() string { return proto.CompactTextString(m) }
+func (*ChunkedResourceChunk) ProtoMessage() {}
+func (*ChunkedResourceChunk) Descriptor() ([]byte, []int) {
+ return fileDescriptor_213c0bb044472049, []int{12}
+}
+func (m *ChunkedResourceChunk) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ChunkedResourceChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ChunkedResourceChunk.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ChunkedResourceChunk) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ChunkedResourceChunk.Merge(m, src)
+}
+func (m *ChunkedResourceChunk) XXX_Size() int {
+ return m.Size()
+}
+func (m *ChunkedResourceChunk) XXX_DiscardUnknown() {
+ xxx_messageInfo_ChunkedResourceChunk.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChunkedResourceChunk proto.InternalMessageInfo
+
+func (m *ChunkedResourceChunk) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *ChunkedResourceChunk) GetChunkId() int32 {
+ if m != nil {
+ return m.ChunkId
+ }
+ return 0
+}
+
+func (m *ChunkedResourceChunk) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("f5.nginx.agent.sdk.Command_CommandType", Command_CommandType_name, Command_CommandType_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.CommandStatusResponse_CommandStatus", CommandStatusResponse_CommandStatus_name, CommandStatusResponse_CommandStatus_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.CommandStatusResponse_CommandErrorCode", CommandStatusResponse_CommandErrorCode_name, CommandStatusResponse_CommandErrorCode_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.NginxConfigStatus_Status", NginxConfigStatus_Status_name, NginxConfigStatus_Status_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.UploadStatus_TransferStatus", UploadStatus_TransferStatus_name, UploadStatus_TransferStatus_value)
+ proto.RegisterType((*Command)(nil), "f5.nginx.agent.sdk.Command")
+ proto.RegisterType((*CommandStatusResponse)(nil), "f5.nginx.agent.sdk.CommandStatusResponse")
+ proto.RegisterType((*DataplaneStatus)(nil), "f5.nginx.agent.sdk.DataplaneStatus")
+ proto.RegisterType((*AgentActivityStatus)(nil), "f5.nginx.agent.sdk.AgentActivityStatus")
+ proto.RegisterType((*NginxConfigStatus)(nil), "f5.nginx.agent.sdk.NginxConfigStatus")
+ proto.RegisterType((*DataplaneSoftwareHealth)(nil), "f5.nginx.agent.sdk.DataplaneSoftwareHealth")
+ proto.RegisterType((*DataplaneUpdate)(nil), "f5.nginx.agent.sdk.DataplaneUpdate")
+ proto.RegisterType((*DownloadRequest)(nil), "f5.nginx.agent.sdk.DownloadRequest")
+ proto.RegisterType((*NginxConfigResponse)(nil), "f5.nginx.agent.sdk.NginxConfigResponse")
+ proto.RegisterType((*UploadStatus)(nil), "f5.nginx.agent.sdk.UploadStatus")
+ proto.RegisterType((*DataChunk)(nil), "f5.nginx.agent.sdk.DataChunk")
+ proto.RegisterType((*ChunkedResourceHeader)(nil), "f5.nginx.agent.sdk.ChunkedResourceHeader")
+ proto.RegisterType((*ChunkedResourceChunk)(nil), "f5.nginx.agent.sdk.ChunkedResourceChunk")
+}
+
+func init() { proto.RegisterFile("command.proto", fileDescriptor_213c0bb044472049) }
+
+var fileDescriptor_213c0bb044472049 = []byte{
+ // 1534 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6e, 0xdb, 0x46,
+ 0x17, 0x26, 0x15, 0xeb, 0x76, 0x24, 0xdb, 0xca, 0xd8, 0x49, 0x14, 0x23, 0x30, 0x0d, 0xfe, 0x7f,
+ 0x6a, 0xa7, 0x4d, 0x25, 0xd4, 0x41, 0x11, 0x34, 0x59, 0x59, 0x96, 0x13, 0x0a, 0x89, 0x65, 0x63,
+ 0x12, 0x27, 0x40, 0x8a, 0x42, 0x60, 0xc4, 0x91, 0x4c, 0xd8, 0x22, 0x59, 0x92, 0x72, 0xe2, 0xa0,
+ 0xfb, 0xa2, 0x45, 0x37, 0x5d, 0x74, 0xd1, 0xf6, 0x21, 0xfa, 0x1a, 0x5d, 0x66, 0xdd, 0x05, 0x51,
+ 0x64, 0xc9, 0x07, 0x68, 0x17, 0xdd, 0x14, 0x73, 0xa1, 0x44, 0x49, 0x94, 0xec, 0xd4, 0x45, 0x37,
+ 0x9a, 0xe1, 0xf0, 0x3b, 0xdf, 0xb9, 0xcc, 0xcc, 0x37, 0x43, 0xc1, 0x7c, 0xdb, 0xee, 0xf5, 0x74,
+ 0xcb, 0xa8, 0x38, 0xae, 0xed, 0xdb, 0x08, 0x75, 0x3e, 0xad, 0x58, 0x5d, 0xd3, 0x7a, 0x5d, 0xd1,
+ 0xbb, 0xc4, 0xf2, 0x2b, 0x9e, 0x71, 0xb4, 0x52, 0xe0, 0x5d, 0x06, 0x58, 0x29, 0x52, 0xbc, 0x6d,
+ 0x0d, 0x9f, 0xac, 0x8e, 0xd9, 0x15, 0x4f, 0xd7, 0x0d, 0xa7, 0xe5, 0xd9, 0x1d, 0xff, 0x95, 0xee,
+ 0x92, 0x96, 0x41, 0x7c, 0xdd, 0x3c, 0xf6, 0xc4, 0x2b, 0x44, 0x4e, 0x88, 0xe5, 0x7b, 0x55, 0xd6,
+ 0x88, 0x31, 0xe8, 0xda, 0x5d, 0x3b, 0xea, 0x1f, 0xda, 0x5e, 0x34, 0x9e, 0xb7, 0x74, 0x47, 0x74,
+ 0x0b, 0x3c, 0x16, 0xf6, 0xa0, 0xfe, 0x05, 0x90, 0xdd, 0xe6, 0xd1, 0xa2, 0x7b, 0x30, 0xd7, 0x23,
+ 0xbe, 0x5e, 0x96, 0xd7, 0xe4, 0x8d, 0xc2, 0xe6, 0x8d, 0xca, 0x64, 0xd8, 0x95, 0x5d, 0xe2, 0xeb,
+ 0x86, 0xee, 0xeb, 0xb5, 0x5c, 0x18, 0x28, 0x0c, 0x8d, 0xd9, 0x2f, 0xda, 0x81, 0x39, 0xff, 0xd4,
+ 0x21, 0xe5, 0xd4, 0x9a, 0xbc, 0xb1, 0xb0, 0xb9, 0x9e, 0x64, 0x2b, 0xdc, 0x44, 0xed, 0xd3, 0x53,
+ 0x87, 0x70, 0x1a, 0x6a, 0x88, 0xd9, 0x2f, 0x7a, 0x01, 0xd0, 0xee, 0x19, 0x2d, 0xcf, 0xd7, 0xfd,
+ 0xbe, 0x57, 0xbe, 0xc4, 0x02, 0xb9, 0x35, 0x83, 0xec, 0x09, 0x03, 0x62, 0xe2, 0x39, 0xb6, 0xe5,
+ 0x91, 0xda, 0x42, 0x18, 0x28, 0x31, 0x02, 0x4d, 0xc2, 0xf9, 0x76, 0x4f, 0x80, 0xd0, 0x33, 0x28,
+ 0x32, 0x96, 0x16, 0xaf, 0x6f, 0x79, 0x8e, 0xb1, 0x2b, 0x49, 0xec, 0x4d, 0xfa, 0xbc, 0xcd, 0x60,
+ 0xb5, 0x52, 0x18, 0x28, 0x23, 0x86, 0x9a, 0x84, 0x79, 0x09, 0x39, 0x00, 0xbd, 0x86, 0x2b, 0xf1,
+ 0xd7, 0x2d, 0x57, 0x44, 0x53, 0x4e, 0x33, 0x07, 0xeb, 0x67, 0x38, 0x18, 0x04, 0x7f, 0x3d, 0x0c,
+ 0x94, 0x64, 0x26, 0x4d, 0xc2, 0x4b, 0xd6, 0xa4, 0x05, 0xf5, 0xcc, 0x28, 0x29, 0xde, 0x22, 0x6d,
+ 0xbf, 0xe5, 0x92, 0x2f, 0xfb, 0xc4, 0xf3, 0xcb, 0x99, 0xe9, 0x9e, 0xb7, 0x68, 0x6f, 0x9b, 0xe3,
+ 0x31, 0x87, 0x73, 0xcf, 0x89, 0x4c, 0xd4, 0xb3, 0x3e, 0x69, 0x81, 0xbe, 0x82, 0xab, 0xe3, 0x78,
+ 0x91, 0x74, 0x96, 0xb9, 0xde, 0x38, 0xdb, 0xb5, 0xc8, 0x7a, 0x25, 0x0c, 0x94, 0x29, 0x5c, 0x9a,
+ 0x84, 0x97, 0xf5, 0x04, 0x1b, 0xe4, 0xc3, 0xf2, 0xc0, 0x82, 0xd7, 0x89, 0xa7, 0x9d, 0x63, 0xbe,
+ 0x3f, 0x98, 0xe5, 0x9b, 0x95, 0x8f, 0x67, 0x5d, 0x0e, 0x03, 0x25, 0x91, 0x47, 0x93, 0x30, 0xd2,
+ 0x27, 0xf0, 0x74, 0xfd, 0xc4, 0xd1, 0xe5, 0xfc, 0xf4, 0xf5, 0x13, 0xf3, 0xc6, 0xd7, 0x4f, 0xdc,
+ 0x90, 0xae, 0x9f, 0x18, 0x3d, 0xea, 0x40, 0x89, 0x6e, 0x29, 0xe7, 0x58, 0xb7, 0x48, 0xb4, 0xf2,
+ 0x0b, 0x8c, 0xfb, 0x7f, 0x49, 0xdc, 0xf5, 0x08, 0xcb, 0x97, 0x75, 0x6d, 0x39, 0x0c, 0x94, 0x09,
+ 0x02, 0x4d, 0xc2, 0x8b, 0xc6, 0x28, 0x10, 0x7d, 0x01, 0x45, 0xa6, 0x14, 0x2d, 0x97, 0x38, 0xb6,
+ 0xeb, 0x97, 0x8b, 0xd3, 0xab, 0xc5, 0x85, 0xa5, 0xb2, 0x43, 0x1b, 0xcc, 0xd0, 0x3c, 0x8d, 0xb8,
+ 0x3d, 0x4d, 0x83, 0x0c, 0x01, 0xe8, 0x3b, 0x19, 0x56, 0x62, 0x61, 0x8c, 0x49, 0x56, 0x79, 0x9e,
+ 0x79, 0xbb, 0x3d, 0x3b, 0x23, 0x61, 0x54, 0xe7, 0x36, 0xb5, 0xd5, 0x30, 0x50, 0x66, 0x70, 0x6a,
+ 0x12, 0x2e, 0x1b, 0x53, 0x6c, 0x47, 0xab, 0xda, 0x77, 0x0c, 0xdd, 0x27, 0xe5, 0x85, 0x73, 0x54,
+ 0xf5, 0x80, 0x41, 0xc7, 0xab, 0xca, 0x09, 0x46, 0xaa, 0xca, 0x81, 0xea, 0x1d, 0x28, 0xc4, 0x04,
+ 0x0d, 0x01, 0x64, 0x9a, 0x7b, 0x78, 0x77, 0xeb, 0x71, 0x49, 0x42, 0x45, 0xc8, 0xd5, 0xf7, 0x9e,
+ 0x37, 0x1f, 0xef, 0x6d, 0xd5, 0x4b, 0x32, 0x7d, 0x73, 0xb0, 0xcf, 0xfa, 0xa9, 0x5a, 0x06, 0xe6,
+ 0x28, 0x8f, 0xfa, 0xc3, 0x25, 0xb8, 0x92, 0xa8, 0x64, 0xe8, 0x73, 0xc8, 0x88, 0xa5, 0x20, 0x33,
+ 0x45, 0xbd, 0x7b, 0x6e, 0x11, 0x1c, 0x1d, 0xad, 0x41, 0x18, 0x28, 0x82, 0x0a, 0x8b, 0x16, 0x99,
+ 0x00, 0xc4, 0x75, 0x6d, 0xb7, 0xd5, 0xb6, 0x8d, 0x48, 0xb2, 0xef, 0xbd, 0xb7, 0x83, 0x1d, 0x4a,
+ 0xb1, 0x6d, 0x1b, 0x42, 0x76, 0x87, 0x8c, 0x38, 0x4f, 0xa2, 0x57, 0xe8, 0x26, 0x64, 0x7b, 0xc4,
+ 0xf3, 0xf4, 0x2e, 0x61, 0x6a, 0x9e, 0xaf, 0x15, 0xc2, 0x40, 0x89, 0x86, 0x70, 0xd4, 0x41, 0x0a,
+ 0xa4, 0x99, 0x0d, 0x13, 0xe5, 0x7c, 0x2d, 0x1f, 0x06, 0x0a, 0x1f, 0xc0, 0xbc, 0x51, 0xef, 0xc3,
+ 0xfc, 0x48, 0x30, 0x68, 0x11, 0x0a, 0xdb, 0xbb, 0xf5, 0xd6, 0x41, 0xf3, 0x51, 0x73, 0xef, 0x79,
+ 0xb3, 0x24, 0xd1, 0xfa, 0xd2, 0x81, 0xbd, 0x47, 0x25, 0x19, 0xcd, 0x43, 0x9e, 0xf6, 0x77, 0x30,
+ 0xde, 0xc3, 0xa5, 0x94, 0x5a, 0x85, 0xd2, 0x78, 0xcc, 0x14, 0xbe, 0x83, 0x31, 0x85, 0x4b, 0x94,
+ 0x8b, 0xf6, 0x23, 0x2e, 0x59, 0xfd, 0x39, 0x0d, 0x8b, 0x63, 0xfb, 0x0c, 0x7d, 0x08, 0x79, 0xef,
+ 0xd4, 0xf3, 0x49, 0xaf, 0x65, 0x1a, 0x6c, 0x52, 0xf2, 0xb5, 0xf9, 0x30, 0x50, 0x86, 0x83, 0x38,
+ 0xc7, 0xbb, 0x0d, 0x03, 0x3d, 0x84, 0x6c, 0xb4, 0xee, 0x53, 0x6b, 0x97, 0x36, 0x0a, 0x9b, 0x6b,
+ 0x53, 0x0f, 0x81, 0x68, 0xad, 0xb3, 0xba, 0x08, 0x23, 0x1c, 0x75, 0xe8, 0x91, 0x4c, 0x0f, 0x71,
+ 0x71, 0x12, 0x26, 0x1e, 0xc9, 0x9a, 0xed, 0xf9, 0x0d, 0xab, 0x63, 0xf3, 0xb3, 0x94, 0xa2, 0x31,
+ 0xfb, 0x45, 0x0f, 0x20, 0x7b, 0x48, 0xf4, 0x63, 0xff, 0xd0, 0x2b, 0xa7, 0x59, 0x10, 0xd3, 0x8f,
+ 0x3a, 0x8d, 0xe1, 0x78, 0x0c, 0xc2, 0x06, 0x47, 0x1d, 0xf4, 0xed, 0xec, 0x8d, 0x9d, 0x61, 0xdc,
+ 0xff, 0xea, 0xc6, 0x9e, 0xb1, 0xad, 0xbf, 0x49, 0x0e, 0x26, 0x4a, 0x34, 0xcb, 0x82, 0xf9, 0xe8,
+ 0x5c, 0xc1, 0x88, 0xa4, 0xa7, 0xc5, 0x12, 0xd5, 0x61, 0x32, 0x16, 0x4d, 0x14, 0xe6, 0x24, 0x3a,
+ 0x7e, 0xf5, 0xb6, 0x6f, 0x9e, 0x98, 0xfe, 0x69, 0xa4, 0xde, 0x39, 0x16, 0xc5, 0xf4, 0xe3, 0x77,
+ 0x4b, 0xe0, 0xc5, 0x16, 0x8d, 0x1d, 0xbf, 0x63, 0x4c, 0xe2, 0xf0, 0x1d, 0xc5, 0xab, 0xdf, 0xcb,
+ 0xb0, 0x94, 0xc0, 0x83, 0x1c, 0x58, 0x1a, 0xb9, 0x3e, 0xc4, 0x04, 0xa4, 0xb0, 0x79, 0xf3, 0x8c,
+ 0x6b, 0x88, 0x88, 0xe5, 0x5a, 0x18, 0x28, 0x49, 0x2c, 0x9a, 0x84, 0x2f, 0x5b, 0x13, 0xe8, 0x1c,
+ 0x64, 0x44, 0x4c, 0x3f, 0xa5, 0xe0, 0xf2, 0x04, 0x1b, 0xfa, 0x0c, 0x16, 0xda, 0xb6, 0xeb, 0x92,
+ 0x63, 0xdd, 0x37, 0x6d, 0x6b, 0xb8, 0x71, 0x50, 0x18, 0x28, 0x63, 0x6f, 0xf0, 0x7c, 0xec, 0xb9,
+ 0x61, 0xa0, 0xfd, 0x81, 0x00, 0x72, 0x7d, 0xba, 0x7d, 0xae, 0xf8, 0x2b, 0x33, 0x54, 0xef, 0x9c,
+ 0x52, 0xb4, 0x0e, 0x39, 0x9e, 0xbf, 0x69, 0x08, 0x35, 0x2a, 0x86, 0x81, 0x32, 0x18, 0xc3, 0x59,
+ 0xd6, 0x6b, 0x18, 0xea, 0x46, 0x94, 0x3c, 0x2a, 0x40, 0x76, 0x7f, 0xa7, 0x59, 0x6f, 0x34, 0x1f,
+ 0x96, 0x24, 0x94, 0x81, 0x14, 0xd3, 0xa0, 0x3c, 0xa4, 0x23, 0xfd, 0xf9, 0x53, 0x86, 0x6b, 0x53,
+ 0x96, 0xdf, 0xf0, 0x56, 0xca, 0xd7, 0x9b, 0x98, 0xad, 0x33, 0xb7, 0x6a, 0xec, 0x56, 0xca, 0x0d,
+ 0x07, 0xb7, 0x52, 0xc1, 0xfb, 0x06, 0xae, 0xea, 0x8e, 0xd3, 0xa2, 0xb7, 0x7c, 0x7a, 0xa7, 0x7a,
+ 0xa5, 0x77, 0x22, 0x0f, 0xa9, 0x19, 0x97, 0x43, 0xc7, 0xd9, 0xe7, 0x06, 0xcf, 0xb7, 0x1e, 0x08,
+ 0x4f, 0xfc, 0x82, 0x96, 0x48, 0xc5, 0x6e, 0x87, 0x43, 0x13, 0xbd, 0x23, 0x4c, 0x72, 0x90, 0xe1,
+ 0x00, 0xf5, 0x37, 0x39, 0x26, 0xa4, 0xfc, 0xc4, 0x1c, 0x68, 0x9a, 0xfc, 0x0f, 0x34, 0xed, 0x0c,
+ 0x2d, 0x4a, 0xfd, 0x97, 0x5a, 0xa4, 0xee, 0xc2, 0x62, 0xdd, 0x7e, 0x65, 0x1d, 0xdb, 0xba, 0x11,
+ 0xdd, 0x11, 0x2f, 0xf0, 0x09, 0xa5, 0x7e, 0x9d, 0x82, 0xa5, 0x84, 0xef, 0x02, 0xb4, 0x3b, 0x72,
+ 0x15, 0x78, 0xaf, 0xef, 0xa1, 0xa4, 0x6d, 0xd0, 0x80, 0x0c, 0x55, 0x19, 0xdb, 0x12, 0x1b, 0xeb,
+ 0x2c, 0x61, 0xd8, 0x62, 0x60, 0x4e, 0xc5, 0x0d, 0xb1, 0x68, 0xd1, 0x33, 0x28, 0x08, 0x91, 0xa0,
+ 0x09, 0x89, 0x43, 0xea, 0xff, 0xc9, 0xe1, 0x51, 0x58, 0x9d, 0x78, 0x6d, 0xd7, 0x74, 0x7c, 0xdb,
+ 0xad, 0x2d, 0x86, 0x81, 0x12, 0x37, 0xc6, 0xc0, 0x1f, 0xe8, 0x34, 0xa9, 0x7f, 0xc8, 0x50, 0x3c,
+ 0x70, 0x68, 0x5d, 0xc5, 0x06, 0xbb, 0xc8, 0x97, 0xe9, 0x93, 0x31, 0x21, 0xa9, 0x26, 0x59, 0xc7,
+ 0xbd, 0x55, 0x9e, 0xba, 0xba, 0xe5, 0x75, 0x88, 0x3b, 0x43, 0x4b, 0x54, 0xc8, 0xb8, 0x44, 0xf7,
+ 0x6c, 0x4b, 0x48, 0x09, 0xc3, 0xf0, 0x11, 0x2c, 0x5a, 0xf5, 0x13, 0x58, 0x18, 0x65, 0xa2, 0x3a,
+ 0x31, 0xbc, 0xaf, 0x44, 0x3a, 0x01, 0x90, 0x79, 0xb0, 0xd5, 0x78, 0xbc, 0x53, 0x2f, 0xa5, 0xd4,
+ 0x5f, 0x64, 0xc8, 0xd3, 0x0a, 0x6c, 0x1f, 0xf6, 0xad, 0x23, 0xb4, 0xc7, 0xb6, 0x91, 0x41, 0xdc,
+ 0x99, 0x13, 0x4f, 0xa1, 0xc4, 0xc0, 0xc4, 0xb3, 0xfb, 0x6e, 0x9b, 0xaa, 0x8a, 0x41, 0x5c, 0x1e,
+ 0x0f, 0x37, 0xd6, 0x24, 0x2c, 0x7a, 0x48, 0xe3, 0xd7, 0x4e, 0xa1, 0x00, 0x1b, 0xe7, 0xa0, 0x63,
+ 0x8f, 0xbc, 0xa4, 0xd4, 0x52, 0x93, 0x30, 0x6b, 0x6b, 0x59, 0x48, 0xb7, 0xe9, 0x2b, 0xf5, 0xad,
+ 0x0c, 0x57, 0x12, 0x43, 0xb8, 0xd0, 0x9c, 0xa9, 0x90, 0x61, 0xf4, 0x7c, 0xce, 0xd2, 0x3c, 0x1d,
+ 0x3e, 0x82, 0x45, 0x8b, 0x36, 0x20, 0xd7, 0x3e, 0x24, 0xed, 0x23, 0xaf, 0xdf, 0x13, 0x93, 0xc0,
+ 0x74, 0x3a, 0x1a, 0xc3, 0x83, 0x1e, 0xfa, 0x18, 0x80, 0xd9, 0xb4, 0x3c, 0xf3, 0x0d, 0x61, 0x9a,
+ 0x9e, 0x16, 0xff, 0x14, 0x0c, 0x46, 0x71, 0x9e, 0xf5, 0x9f, 0x98, 0x6f, 0x88, 0xfa, 0xa3, 0x0c,
+ 0xcb, 0x49, 0x65, 0xb8, 0x50, 0x46, 0xeb, 0x34, 0x5a, 0xea, 0xcd, 0x34, 0x44, 0x4e, 0x22, 0x5a,
+ 0x3e, 0x86, 0xb3, 0xac, 0xd7, 0x30, 0xd0, 0x0d, 0x31, 0x47, 0x34, 0xa5, 0xe2, 0xb0, 0xf2, 0xa2,
+ 0xee, 0x77, 0x7f, 0x7d, 0xb7, 0x2a, 0xbf, 0x7d, 0xb7, 0x2a, 0xff, 0xfe, 0x6e, 0x55, 0x7e, 0x71,
+ 0xab, 0x6b, 0xfa, 0x87, 0xfd, 0x97, 0x95, 0xb6, 0xdd, 0xab, 0xb2, 0x48, 0xaa, 0x2c, 0x92, 0xaa,
+ 0x67, 0x1c, 0x55, 0x4f, 0x36, 0xab, 0xec, 0x1f, 0x9e, 0xfb, 0xec, 0xf7, 0x65, 0x86, 0x35, 0x77,
+ 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x53, 0x4d, 0x33, 0x9b, 0x12, 0x00, 0x00,
+}
+
+func (m *Command) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Command) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Data != nil {
+ {
+ size := m.Data.Size()
+ i -= size
+ if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if m.Type != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Command_CmdStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_CmdStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.CmdStatus != nil {
+ {
+ size, err := m.CmdStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_NginxConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_NginxConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NginxConfig != nil {
+ {
+ size, err := m.NginxConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_NginxConfigResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_NginxConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NginxConfigResponse != nil {
+ {
+ size, err := m.NginxConfigResponse.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_AgentConnectRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_AgentConnectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AgentConnectRequest != nil {
+ {
+ size, err := m.AgentConnectRequest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_AgentConnectResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_AgentConnectResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AgentConnectResponse != nil {
+ {
+ size, err := m.AgentConnectResponse.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_AgentConfigRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_AgentConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AgentConfigRequest != nil {
+ {
+ size, err := m.AgentConfigRequest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_AgentConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_AgentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AgentConfig != nil {
+ {
+ size, err := m.AgentConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_DataplaneStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_DataplaneStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DataplaneStatus != nil {
+ {
+ size, err := m.DataplaneStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_EventReport) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_EventReport) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.EventReport != nil {
+ {
+ size, err := m.EventReport.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x62
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_DataplaneSoftwareDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_DataplaneSoftwareDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DataplaneSoftwareDetails != nil {
+ {
+ size, err := m.DataplaneSoftwareDetails.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Command_DataplaneUpdate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Command_DataplaneUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DataplaneUpdate != nil {
+ {
+ size, err := m.DataplaneUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ return len(dAtA) - i, nil
+}
+func (m *CommandStatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CommandStatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CommandStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Error) > 0 {
+ i -= len(m.Error)
+ copy(dAtA[i:], m.Error)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.Error)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ErrorCode != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.ErrorCode))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Status != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.Status))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataplaneStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataplaneStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.AgentActivityStatus) > 0 {
+ for iNdEx := len(m.AgentActivityStatus) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AgentActivityStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.DataplaneSoftwareHealths) > 0 {
+ for iNdEx := len(m.DataplaneSoftwareHealths) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DataplaneSoftwareHealths[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.DataplaneSoftwareDetails) > 0 {
+ for iNdEx := len(m.DataplaneSoftwareDetails) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DataplaneSoftwareDetails[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.Healths) > 0 {
+ for iNdEx := len(m.Healths) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Healths[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Host != nil {
+ {
+ size, err := m.Host.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Details) > 0 {
+ for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Details[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.SystemId) > 0 {
+ i -= len(m.SystemId)
+ copy(dAtA[i:], m.SystemId)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.SystemId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentActivityStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AgentActivityStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentActivityStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Status != nil {
+ {
+ size := m.Status.Size()
+ i -= size
+ if _, err := m.Status.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AgentActivityStatus_NginxConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AgentActivityStatus_NginxConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NginxConfigStatus != nil {
+ {
+ size, err := m.NginxConfigStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *NginxConfigStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.NginxId) > 0 {
+ i -= len(m.NginxId)
+ copy(dAtA[i:], m.NginxId)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.NginxId)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Status != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.Status))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.CorrelationId) > 0 {
+ i -= len(m.CorrelationId)
+ copy(dAtA[i:], m.CorrelationId)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.CorrelationId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataplaneSoftwareHealth) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataplaneSoftwareHealth) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneSoftwareHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Health != nil {
+ {
+ size := m.Health.Size()
+ i -= size
+ if _, err := m.Health.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataplaneSoftwareHealth_NginxHealth) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneSoftwareHealth_NginxHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NginxHealth != nil {
+ {
+ size, err := m.NginxHealth.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *DataplaneSoftwareHealth_AppProtectWafHealth) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneSoftwareHealth_AppProtectWafHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AppProtectWafHealth != nil {
+ {
+ size, err := m.AppProtectWafHealth.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *DataplaneUpdate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataplaneUpdate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.DataplaneSoftwareDetails) > 0 {
+ for iNdEx := len(m.DataplaneSoftwareDetails) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DataplaneSoftwareDetails[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Host != nil {
+ {
+ size, err := m.Host.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DownloadRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DownloadRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DownloadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxConfigResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxConfigResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ConfigData != nil {
+ {
+ size, err := m.ConfigData.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Action != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Status != nil {
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UploadStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UploadStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UploadStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Reason) > 0 {
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Status != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.Status))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataChunk) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataChunk) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Chunk != nil {
+ {
+ size := m.Chunk.Size()
+ i -= size
+ if _, err := m.Chunk.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataChunk_Header) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataChunk_Header) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *DataChunk_Data) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataChunk_Data) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.Data != nil {
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ChunkedResourceHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ChunkedResourceHeader) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ChunkedResourceHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ChunkSize != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.ChunkSize))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Checksum) > 0 {
+ i -= len(m.Checksum)
+ copy(dAtA[i:], m.Checksum)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.Checksum)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Chunks != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.Chunks))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ChunkedResourceChunk) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ChunkedResourceChunk) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ChunkedResourceChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintCommand(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ChunkId != 0 {
+ i = encodeVarintCommand(dAtA, i, uint64(m.ChunkId))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommand(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintCommand(dAtA []byte, offset int, v uint64) int {
+ offset -= sovCommand(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Command) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.Type != 0 {
+ n += 1 + sovCommand(uint64(m.Type))
+ }
+ if m.Data != nil {
+ n += m.Data.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Command_CmdStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CmdStatus != nil {
+ l = m.CmdStatus.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_NginxConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NginxConfig != nil {
+ l = m.NginxConfig.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_NginxConfigResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NginxConfigResponse != nil {
+ l = m.NginxConfigResponse.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_AgentConnectRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AgentConnectRequest != nil {
+ l = m.AgentConnectRequest.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_AgentConnectResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AgentConnectResponse != nil {
+ l = m.AgentConnectResponse.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_AgentConfigRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AgentConfigRequest != nil {
+ l = m.AgentConfigRequest.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_AgentConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AgentConfig != nil {
+ l = m.AgentConfig.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_DataplaneStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DataplaneStatus != nil {
+ l = m.DataplaneStatus.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_EventReport) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.EventReport != nil {
+ l = m.EventReport.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_DataplaneSoftwareDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DataplaneSoftwareDetails != nil {
+ l = m.DataplaneSoftwareDetails.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *Command_DataplaneUpdate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DataplaneUpdate != nil {
+ l = m.DataplaneUpdate.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *CommandStatusResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Status != 0 {
+ n += 1 + sovCommand(uint64(m.Status))
+ }
+ if m.ErrorCode != 0 {
+ n += 1 + sovCommand(uint64(m.ErrorCode))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ l = len(m.Error)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DataplaneStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SystemId)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if len(m.Details) > 0 {
+ for _, e := range m.Details {
+ l = e.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ }
+ if m.Host != nil {
+ l = m.Host.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if len(m.Healths) > 0 {
+ for _, e := range m.Healths {
+ l = e.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ }
+ if len(m.DataplaneSoftwareDetails) > 0 {
+ for _, e := range m.DataplaneSoftwareDetails {
+ l = e.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ }
+ if len(m.DataplaneSoftwareHealths) > 0 {
+ for _, e := range m.DataplaneSoftwareHealths {
+ l = e.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ }
+ if len(m.AgentActivityStatus) > 0 {
+ for _, e := range m.AgentActivityStatus {
+ l = e.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentActivityStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Status != nil {
+ n += m.Status.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AgentActivityStatus_NginxConfigStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NginxConfigStatus != nil {
+ l = m.NginxConfigStatus.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *NginxConfigStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CorrelationId)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.Status != 0 {
+ n += 1 + sovCommand(uint64(m.Status))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ l = len(m.NginxId)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DataplaneSoftwareHealth) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Health != nil {
+ n += m.Health.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DataplaneSoftwareHealth_NginxHealth) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NginxHealth != nil {
+ l = m.NginxHealth.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *DataplaneSoftwareHealth_AppProtectWafHealth) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AppProtectWafHealth != nil {
+ l = m.AppProtectWafHealth.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *DataplaneUpdate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Host != nil {
+ l = m.Host.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if len(m.DataplaneSoftwareDetails) > 0 {
+ for _, e := range m.DataplaneSoftwareDetails {
+ l = e.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DownloadRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *NginxConfigResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Status != nil {
+ l = m.Status.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.Action != 0 {
+ n += 1 + sovCommand(uint64(m.Action))
+ }
+ if m.ConfigData != nil {
+ l = m.ConfigData.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *UploadStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.Status != 0 {
+ n += 1 + sovCommand(uint64(m.Status))
+ }
+ l = len(m.Reason)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DataChunk) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Chunk != nil {
+ n += m.Chunk.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DataChunk_Header) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *DataChunk_Data) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Data != nil {
+ l = m.Data.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ return n
+}
+func (m *ChunkedResourceHeader) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.Chunks != 0 {
+ n += 1 + sovCommand(uint64(m.Chunks))
+ }
+ l = len(m.Checksum)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.ChunkSize != 0 {
+ n += 1 + sovCommand(uint64(m.ChunkSize))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ChunkedResourceChunk) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.ChunkId != 0 {
+ n += 1 + sovCommand(uint64(m.ChunkId))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovCommand(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovCommand(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozCommand(x uint64) (n int) {
+ return sovCommand(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Command) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Command: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Command: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= Command_CommandType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CmdStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &CommandStatusResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_CmdStatus{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &NginxConfig{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_NginxConfig{v}
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxConfigResponse", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &NginxConfigResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_NginxConfigResponse{v}
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentConnectRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &AgentConnectRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_AgentConnectRequest{v}
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentConnectResponse", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &AgentConnectResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_AgentConnectResponse{v}
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentConfigRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &AgentConfigRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_AgentConfigRequest{v}
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &AgentConfig{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_AgentConfig{v}
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DataplaneStatus{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_DataplaneStatus{v}
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EventReport", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &events.EventReport{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_EventReport{v}
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneSoftwareDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DataplaneSoftwareDetails{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_DataplaneSoftwareDetails{v}
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DataplaneUpdate{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Command_DataplaneUpdate{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CommandStatusResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CommandStatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CommandStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ m.Status = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Status |= CommandStatusResponse_CommandStatus(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorCode", wireType)
+ }
+ m.ErrorCode = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ErrorCode |= CommandStatusResponse_CommandErrorCode(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DataplaneStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataplaneStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataplaneStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SystemId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Details = append(m.Details, &NginxDetails{})
+ if err := m.Details[len(m.Details)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Host == nil {
+ m.Host = &HostInfo{}
+ }
+ if err := m.Host.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Healths", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Healths = append(m.Healths, &NginxHealth{})
+ if err := m.Healths[len(m.Healths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneSoftwareDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DataplaneSoftwareDetails = append(m.DataplaneSoftwareDetails, &DataplaneSoftwareDetails{})
+ if err := m.DataplaneSoftwareDetails[len(m.DataplaneSoftwareDetails)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneSoftwareHealths", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DataplaneSoftwareHealths = append(m.DataplaneSoftwareHealths, &DataplaneSoftwareHealth{})
+ if err := m.DataplaneSoftwareHealths[len(m.DataplaneSoftwareHealths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentActivityStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AgentActivityStatus = append(m.AgentActivityStatus, &AgentActivityStatus{})
+ if err := m.AgentActivityStatus[len(m.AgentActivityStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AgentActivityStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AgentActivityStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AgentActivityStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxConfigStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &NginxConfigStatus{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Status = &AgentActivityStatus_NginxConfigStatus{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxConfigStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxConfigStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CorrelationId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CorrelationId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ m.Status = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Status |= NginxConfigStatus_Status(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NginxId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DataplaneSoftwareHealth) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataplaneSoftwareHealth: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataplaneSoftwareHealth: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxHealth", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &NginxHealth{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Health = &DataplaneSoftwareHealth_NginxHealth{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AppProtectWafHealth", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &AppProtectWAFHealth{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Health = &DataplaneSoftwareHealth_AppProtectWafHealth{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DataplaneUpdate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataplaneUpdate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataplaneUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Host == nil {
+ m.Host = &HostInfo{}
+ }
+ if err := m.Host.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataplaneSoftwareDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DataplaneSoftwareDetails = append(m.DataplaneSoftwareDetails, &DataplaneSoftwareDetails{})
+ if err := m.DataplaneSoftwareDetails[len(m.DataplaneSoftwareDetails)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DownloadRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DownloadRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DownloadRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxConfigResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxConfigResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Status == nil {
+ m.Status = &CommandStatusResponse{}
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= NginxConfigAction(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigData == nil {
+ m.ConfigData = &ConfigDescriptor{}
+ }
+ if err := m.ConfigData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UploadStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UploadStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UploadStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ m.Status = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Status |= UploadStatus_TransferStatus(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DataChunk) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataChunk: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataChunk: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &ChunkedResourceHeader{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Chunk = &DataChunk_Header{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &ChunkedResourceChunk{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Chunk = &DataChunk_Data{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ChunkedResourceHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ChunkedResourceHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ChunkedResourceHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType)
+ }
+ m.Chunks = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Chunks |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Checksum = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChunkSize", wireType)
+ }
+ m.ChunkSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ChunkSize |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ChunkedResourceChunk) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ChunkedResourceChunk: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ChunkedResourceChunk: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChunkId", wireType)
+ }
+ m.ChunkId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ChunkId |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCommand
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommand(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommand
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipCommand(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommand
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthCommand
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupCommand
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCommand
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthCommand = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowCommand = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupCommand = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.proto
new file mode 100644
index 000000000..dcdc69782
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command.proto
@@ -0,0 +1,232 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "agent.proto";
+import "common.proto";
+import "config.proto";
+import "dp_software_details.proto";
+import "events/event.proto";
+import "gogo.proto";
+import "host.proto";
+import "nap.proto";
+import "nginx.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents a command message, which is used for communication between the management server and the agent.
+message Command {
+ // Provides metadata information associated with the command
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+ // Command type enum
+ enum CommandType {
+ // All commands default to normal
+ NORMAL = 0;
+ // The download type is used when sending NginxConfig from the management server to the agent.
+ // It is used to instruct the agent to download the NGINX config from the management server.
+ DOWNLOAD = 1;
+ // The upload type is used when sending NginxConfig from the agent to the management server.
+ // It is used to instruct the agent to upload the NGINX config from the agent.
+ // This will be implemented in a future release.
+ UPLOAD = 2;
+ }
+ // Used to determine the type of command
+ CommandType type = 2 [(gogoproto.jsontag) = "type"];
+
+ oneof data {
+ // Common command status response
+ CommandStatusResponse cmd_status = 3 [(gogoproto.jsontag) = "cmd_status"];
+
+ // Used by the management server to notify the agent to download or upload NGINX configuration.
+ NginxConfig nginx_config = 4 [(gogoproto.jsontag) = "nginx_config"];
+
+ // Response sent to indicate if a NGINX config apply was successful or not
+ NginxConfigResponse nginx_config_response = 5 [(gogoproto.jsontag) = "nginx_config_response"];
+
+ // Agent connect request that is sent from the agent to the management server to initialize registration
+ AgentConnectRequest agent_connect_request = 6 [(gogoproto.jsontag) = "agent_connect_request"];
+
+ // Agent connect response that is sent from the management server to the agent to finalize registration
+ AgentConnectResponse agent_connect_response = 7 [(gogoproto.jsontag) = "agent_connect_response"];
+
+ // Agent config request that is sent by the agent to the management server to request agent configuration
+ AgentConfigRequest agent_config_request = 8 [(gogoproto.jsontag) = "agent_config_request"];
+
+ // Agent Config is sent by the management server to the agent when is receives an AgentConfigRequest from the agent
+ AgentConfig agent_config = 9 [(gogoproto.jsontag) = "agent_config"];
+
+ // Dataplane status is sent by the agent to the management server to report the information like the health of the system
+ DataplaneStatus dataplane_status = 11 [(gogoproto.jsontag) = "dataplane_status"];
+
+ // Reports events the agent is aware of like the start/stop of the agent, NGINX config applies, etc.
+ f5.nginx.agent.sdk.events.EventReport event_report = 12 [(gogoproto.jsontag) = "event_report"];
+
+ // Provides details of additional software running on the dataplane
+ DataplaneSoftwareDetails dataplane_software_details = 13 [(gogoproto.jsontag) = "dataplane_software_details"];
+
+ // Provides details of any changes on the dataplane
+ DataplaneUpdate dataplane_update = 14 [(gogoproto.jsontag) = "dataplane_update"];
+ }
+}
+
+// Represents a command status response
+message CommandStatusResponse {
+ // Command status enum
+ enum CommandStatus {
+ // Unknown status of command
+ CMD_UNKNOWN = 0;
+ // Command was successful
+ CMD_OK = 1;
+ // Command failed
+ CMD_ERROR = 2;
+ }
+
+ // Command error code enum
+ enum CommandErrorCode {
+ // No Error (This is the default value)
+ ERR_OK = 0;
+ // Unknown error
+ ERR_UNKNOWN = 1;
+ }
+
+ // Command status
+ CommandStatus status = 1 [(gogoproto.jsontag) = "status"];
+ // Error code
+ CommandErrorCode error_code = 2 [(gogoproto.jsontag) = "error_code"];
+ // Provides a user friendly message to describe the response
+ string message = 3 [(gogoproto.jsontag) = "message"];
+ // Provides an error message of why the command failed
+ string error = 4 [(gogoproto.jsontag) = "error"];
+}
+
+// Represents a dataplane status, which is used by the agent to periodically report the status of NGINX, agent activities and other dataplane software activities.
+message DataplaneStatus {
+ // System ID
+ string system_id = 1 [(gogoproto.jsontag) = "system_id"];
+ // List of NGINX details. This field will be moving to DataplaneSoftwareDetails in a future release.
+ repeated NginxDetails details = 2 [(gogoproto.jsontag) = "details"];
+ // Host information
+ HostInfo host = 3 [(gogoproto.jsontag) = "host"];
+ // List of NGINX health information. This field will be moving to DataplaneSoftwareHealth in a future release.
+ repeated NginxHealth healths = 5 [(gogoproto.jsontag) = "healths"];
+ // List of software details. This includes details about NGINX and any other software installed in the system that the agent is interested in.
+ repeated DataplaneSoftwareDetails dataplane_software_details = 6 [(gogoproto.jsontag) = "dataplane_software_details"];
+ // List of software health statues. This includes the health of NGINX and any other software installed in the system that the agent is interested in.
+ repeated DataplaneSoftwareHealth dataplane_software_healths = 7 [(gogoproto.jsontag) = "dataplane_software_healths"];
+ // List of activity statuses. Reports on the status of activities that the agent is currently executing.
+ repeated AgentActivityStatus agent_activity_status = 8 [(gogoproto.jsontag) = "agent_activity_status"];
+}
+
+// Represent an agent activity status
+message AgentActivityStatus {
+ oneof Status {
+ // NGINX configuration status
+ NginxConfigStatus nginx_config_status = 1 [(gogoproto.jsontag) = "nginx_config_status"];
+ }
+}
+
+// Represents a NGINX configuration status
+message NginxConfigStatus {
+ // CorrelationID is an ID used by the producer of the message to track the flow of events
+ string correlation_id = 1 [(gogoproto.jsontag) = "correlation_id"];
+ // Provides a status for the NGINX configuration
+ Status status = 2 [(gogoproto.jsontag) = "status"];
+ // Provides a user friendly message to describe the current state of the NGINX configuration.
+ string message = 3 [(gogoproto.jsontag) = "message"];
+ // NGINX ID
+ string nginx_id = 4 [(gogoproto.jsontag) = "nginx_id"];
+
+ // NGINX configuration status enum
+ enum Status {
+ // The configuration is still in the process of being applied.
+ PENDING = 0;
+ // The configuration has being successfully applied.
+ OK = 1;
+ // The configuration has failed to be applied
+ ERROR = 2;
+ }
+}
+
+// Represents a dataplane software health
+message DataplaneSoftwareHealth {
+ oneof health {
+ // Health of NGINX instance
+ NginxHealth nginx_health = 1 [(gogoproto.jsontag) = "nginx_health"];
+ // Health of App Protect WAF
+ AppProtectWAFHealth app_protect_waf_health = 2 [(gogoproto.jsontag) = "app_protect_waf_health"];
+ }
+}
+
+// Represents a dataplane update
+message DataplaneUpdate {
+ // Host information
+ HostInfo host = 1 [(gogoproto.jsontag) = "host"];
+ // List of software details. This includes details about NGINX and any other software installed in the system that the agent is interested in.
+ repeated DataplaneSoftwareDetails dataplane_software_details = 2 [(gogoproto.jsontag) = "dataplane_software_details"];
+}
+
+// Represents a download request
+message DownloadRequest {
+ // Metadata information
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+}
+
+// Represents a NGINX config response
+message NginxConfigResponse {
+ // Command status
+ CommandStatusResponse status = 1 [(gogoproto.jsontag) = "status"];
+ // NGINX config action
+ NginxConfigAction action = 2 [(gogoproto.jsontag) = "action"];
+ // NGINX config description
+ ConfigDescriptor config_data = 3 [(gogoproto.jsontag) = "config_data"];
+}
+
+// Represents an upload status
+message UploadStatus {
+ // Transfer status enum
+ enum TransferStatus {
+ // Unknown status
+ UNKNOWN = 0;
+ // Upload was successful
+ OK = 1;
+ // Upload failed
+ FAILED = 2;
+ }
+ // Metadata information
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+ // Transfer status
+ TransferStatus status = 2 [(gogoproto.jsontag) = "status"];
+ // Provides an error message of why the upload failed
+ string reason = 3 [(gogoproto.jsontag) = "reason"];
+}
+
+// Represents a data chunck
+message DataChunk {
+ oneof chunk {
+ // Chunk header
+ ChunkedResourceHeader header = 1 [(gogoproto.jsontag) = "header"];
+ // Chunk data
+ ChunkedResourceChunk data = 2 [(gogoproto.jsontag) = "data"];
+ }
+}
+
+// Represents a chunked resource Header
+message ChunkedResourceHeader {
+ // Metadata information
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+ // Number of chunks expected in the transfer
+ int32 chunks = 2 [(gogoproto.jsontag) = "chunks"];
+ // Chunk checksum
+ string checksum = 3 [(gogoproto.jsontag) = "checksum"];
+ // Chunk size
+ int32 chunk_size = 4 [(gogoproto.jsontag) = "chunk_size"];
+}
+
+// Represents a chunked resource chunk
+message ChunkedResourceChunk {
+ // Metadata information
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+ // Chunk ID
+ int32 chunk_id = 2 [(gogoproto.jsontag) = "chunk_id"];
+ // Chunk data
+ bytes data = 3 [(gogoproto.jsontag) = "data"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.pb.go
new file mode 100644
index 000000000..05ac4ec44
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.pb.go
@@ -0,0 +1,303 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: command_svc.proto
+
+package proto
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func init() { proto.RegisterFile("command_svc.proto", fileDescriptor_8c55e2f87354005f) }
+
+var fileDescriptor_8c55e2f87354005f = []byte{
+ // 226 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xce, 0xcf, 0xcd,
+ 0x4d, 0xcc, 0x4b, 0x89, 0x2f, 0x2e, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a,
+ 0x33, 0xd5, 0xcb, 0x4b, 0xcf, 0xcc, 0xab, 0xd0, 0x4b, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x2b, 0x4e,
+ 0xc9, 0x96, 0xe2, 0x85, 0x2a, 0x83, 0x28, 0x31, 0x6a, 0x60, 0xe2, 0xe2, 0x74, 0x86, 0x88, 0xa4,
+ 0x16, 0x09, 0x05, 0x70, 0xf1, 0x41, 0x39, 0xce, 0x19, 0x89, 0x79, 0x79, 0xa9, 0x39, 0x42, 0xd2,
+ 0x7a, 0x98, 0x66, 0xe8, 0x41, 0xd5, 0x48, 0xe1, 0x93, 0x54, 0x62, 0xd0, 0x60, 0x34, 0x60, 0x14,
+ 0x0a, 0xe2, 0xe2, 0x70, 0xc9, 0x2f, 0xcf, 0xcb, 0xc9, 0x4f, 0x4c, 0x11, 0x52, 0xc6, 0xa6, 0x1c,
+ 0x26, 0x1b, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, 0x8b, 0x55, 0x51, 0x62, 0x49, 0xa2,
+ 0x73, 0x46, 0x69, 0x5e, 0xb6, 0x12, 0x83, 0x01, 0xa3, 0x90, 0x2f, 0x17, 0x5b, 0x68, 0x01, 0xd8,
+ 0x44, 0xfc, 0x8a, 0xa5, 0x14, 0xb0, 0x49, 0x43, 0xb4, 0x06, 0x97, 0x24, 0x96, 0x94, 0x16, 0x83,
+ 0x1c, 0xe9, 0x64, 0x7e, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
+ 0x46, 0x69, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x83, 0xb5, 0xea,
+ 0x83, 0xb5, 0xea, 0x17, 0xa7, 0x64, 0xeb, 0x97, 0x19, 0xe9, 0x83, 0x43, 0xcd, 0x1a, 0x4c, 0x26,
+ 0xb1, 0x81, 0x29, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x3c, 0x58, 0x98, 0x7a, 0x01,
+ 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// CommanderClient is the client API for Commander service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type CommanderClient interface {
+ // A Bidirectional streaming RPC established by the agent and is kept open
+ CommandChannel(ctx context.Context, opts ...grpc.CallOption) (Commander_CommandChannelClient, error)
+ // A streaming RPC established by the agent and is used to download resources associated with commands
+ // The download stream will be kept open for the duration of the data transfer and will be closed when its done.
+ // The transfer is a stream of chunks as follows: header -> data chunk 1 -> data chunk N.
+ // Each data chunk is of a size smaller than the maximum gRPC payload
+ Download(ctx context.Context, in *DownloadRequest, opts ...grpc.CallOption) (Commander_DownloadClient, error)
+ // A streaming RPC established by the agent and is used to upload resources associated with commands
+ Upload(ctx context.Context, opts ...grpc.CallOption) (Commander_UploadClient, error)
+}
+
+type commanderClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewCommanderClient(cc *grpc.ClientConn) CommanderClient {
+ return &commanderClient{cc}
+}
+
+func (c *commanderClient) CommandChannel(ctx context.Context, opts ...grpc.CallOption) (Commander_CommandChannelClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Commander_serviceDesc.Streams[0], "/f5.nginx.agent.sdk.Commander/CommandChannel", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &commanderCommandChannelClient{stream}
+ return x, nil
+}
+
+type Commander_CommandChannelClient interface {
+ Send(*Command) error
+ Recv() (*Command, error)
+ grpc.ClientStream
+}
+
+type commanderCommandChannelClient struct {
+ grpc.ClientStream
+}
+
+func (x *commanderCommandChannelClient) Send(m *Command) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *commanderCommandChannelClient) Recv() (*Command, error) {
+ m := new(Command)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *commanderClient) Download(ctx context.Context, in *DownloadRequest, opts ...grpc.CallOption) (Commander_DownloadClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Commander_serviceDesc.Streams[1], "/f5.nginx.agent.sdk.Commander/Download", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &commanderDownloadClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Commander_DownloadClient interface {
+ Recv() (*DataChunk, error)
+ grpc.ClientStream
+}
+
+type commanderDownloadClient struct {
+ grpc.ClientStream
+}
+
+func (x *commanderDownloadClient) Recv() (*DataChunk, error) {
+ m := new(DataChunk)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *commanderClient) Upload(ctx context.Context, opts ...grpc.CallOption) (Commander_UploadClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Commander_serviceDesc.Streams[2], "/f5.nginx.agent.sdk.Commander/Upload", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &commanderUploadClient{stream}
+ return x, nil
+}
+
+type Commander_UploadClient interface {
+ Send(*DataChunk) error
+ CloseAndRecv() (*UploadStatus, error)
+ grpc.ClientStream
+}
+
+type commanderUploadClient struct {
+ grpc.ClientStream
+}
+
+func (x *commanderUploadClient) Send(m *DataChunk) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *commanderUploadClient) CloseAndRecv() (*UploadStatus, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(UploadStatus)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// CommanderServer is the server API for Commander service.
+type CommanderServer interface {
+ // A Bidirectional streaming RPC established by the agent and is kept open
+ CommandChannel(Commander_CommandChannelServer) error
+ // A streaming RPC established by the agent and is used to download resources associated with commands
+ // The download stream will be kept open for the duration of the data transfer and will be closed when its done.
+ // The transfer is a stream of chunks as follows: header -> data chunk 1 -> data chunk N.
+ // Each data chunk is of a size smaller than the maximum gRPC payload
+ Download(*DownloadRequest, Commander_DownloadServer) error
+ // A streaming RPC established by the agent and is used to upload resources associated with commands
+ Upload(Commander_UploadServer) error
+}
+
+// UnimplementedCommanderServer can be embedded to have forward compatible implementations.
+type UnimplementedCommanderServer struct {
+}
+
+func (*UnimplementedCommanderServer) CommandChannel(srv Commander_CommandChannelServer) error {
+ return status.Errorf(codes.Unimplemented, "method CommandChannel not implemented")
+}
+func (*UnimplementedCommanderServer) Download(req *DownloadRequest, srv Commander_DownloadServer) error {
+ return status.Errorf(codes.Unimplemented, "method Download not implemented")
+}
+func (*UnimplementedCommanderServer) Upload(srv Commander_UploadServer) error {
+ return status.Errorf(codes.Unimplemented, "method Upload not implemented")
+}
+
+func RegisterCommanderServer(s *grpc.Server, srv CommanderServer) {
+ s.RegisterService(&_Commander_serviceDesc, srv)
+}
+
+func _Commander_CommandChannel_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(CommanderServer).CommandChannel(&commanderCommandChannelServer{stream})
+}
+
+type Commander_CommandChannelServer interface {
+ Send(*Command) error
+ Recv() (*Command, error)
+ grpc.ServerStream
+}
+
+type commanderCommandChannelServer struct {
+ grpc.ServerStream
+}
+
+func (x *commanderCommandChannelServer) Send(m *Command) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *commanderCommandChannelServer) Recv() (*Command, error) {
+ m := new(Command)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _Commander_Download_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(DownloadRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(CommanderServer).Download(m, &commanderDownloadServer{stream})
+}
+
+type Commander_DownloadServer interface {
+ Send(*DataChunk) error
+ grpc.ServerStream
+}
+
+type commanderDownloadServer struct {
+ grpc.ServerStream
+}
+
+func (x *commanderDownloadServer) Send(m *DataChunk) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Commander_Upload_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(CommanderServer).Upload(&commanderUploadServer{stream})
+}
+
+type Commander_UploadServer interface {
+ SendAndClose(*UploadStatus) error
+ Recv() (*DataChunk, error)
+ grpc.ServerStream
+}
+
+type commanderUploadServer struct {
+ grpc.ServerStream
+}
+
+func (x *commanderUploadServer) SendAndClose(m *UploadStatus) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *commanderUploadServer) Recv() (*DataChunk, error) {
+ m := new(DataChunk)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _Commander_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "f5.nginx.agent.sdk.Commander",
+ HandlerType: (*CommanderServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "CommandChannel",
+ Handler: _Commander_CommandChannel_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "Download",
+ Handler: _Commander_Download_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "Upload",
+ Handler: _Commander_Upload_Handler,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "command_svc.proto",
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.proto
new file mode 100644
index 000000000..d3addcb55
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/command_svc.proto
@@ -0,0 +1,21 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "command.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents a service used to sent command messages between the management server and the agent.
+service Commander {
+ // A Bidirectional streaming RPC established by the agent and is kept open
+ rpc CommandChannel(stream Command) returns (stream Command) {}
+
+ // A streaming RPC established by the agent and is used to download resources associated with commands
+ // The download stream will be kept open for the duration of the data transfer and will be closed when its done.
+ // The transfer is a stream of chunks as follows: header -> data chunk 1 -> data chunk N.
+ // Each data chunk is of a size smaller than the maximum gRPC payload
+ rpc Download(DownloadRequest) returns (stream DataChunk) {}
+
+ // A streaming RPC established by the agent and is used to upload resources associated with commands
+ rpc Upload(stream DataChunk) returns (UploadStatus) {}
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.pb.go
new file mode 100644
index 000000000..398267a49
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.pb.go
@@ -0,0 +1,3772 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: common.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ types "github.com/gogo/protobuf/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Action enum
+type File_Action int32
+
+const (
+ // Default value
+ File_unset File_Action = 0
+ // No changes to the file
+ File_unchanged File_Action = 1
+ // New file
+ File_add File_Action = 2
+ // Updated file
+ File_update File_Action = 3
+ // File deleted
+ File_delete File_Action = 4
+)
+
+var File_Action_name = map[int32]string{
+ 0: "unset",
+ 1: "unchanged",
+ 2: "add",
+ 3: "update",
+ 4: "delete",
+}
+
+var File_Action_value = map[string]int32{
+ "unset": 0,
+ "unchanged": 1,
+ "add": 2,
+ "update": 3,
+ "delete": 4,
+}
+
+func (x File_Action) String() string {
+ return proto.EnumName(File_Action_name, int32(x))
+}
+
+func (File_Action) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{2, 0}
+}
+
+// Represents the metadata for a message
+type Metadata struct {
+ // timestamp defines the time of message creation
+ Timestamp *types.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp"`
+ // Client ID
+ ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id"`
+ // Message ID
+ MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId,proto3" json:"message_id"`
+ // Cloud Account ID (e.g. AWS/Azure/GCP account ID)
+ CloudAccountId string `protobuf:"bytes,4,opt,name=cloud_account_id,json=cloudAccountId,proto3" json:"cloud_account_id"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{0}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func (m *Metadata) GetTimestamp() *types.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *Metadata) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *Metadata) GetMessageId() string {
+ if m != nil {
+ return m.MessageId
+ }
+ return ""
+}
+
+func (m *Metadata) GetCloudAccountId() string {
+ if m != nil {
+ return m.CloudAccountId
+ }
+ return ""
+}
+
+// Represents a map of directories & files on the system
+type DirectoryMap struct {
+ // List of directories
+ Directories []*Directory `protobuf:"bytes,1,rep,name=directories,proto3" json:"directories,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DirectoryMap) Reset() { *m = DirectoryMap{} }
+func (m *DirectoryMap) String() string { return proto.CompactTextString(m) }
+func (*DirectoryMap) ProtoMessage() {}
+func (*DirectoryMap) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{1}
+}
+func (m *DirectoryMap) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DirectoryMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DirectoryMap.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DirectoryMap) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DirectoryMap.Merge(m, src)
+}
+func (m *DirectoryMap) XXX_Size() int {
+ return m.Size()
+}
+func (m *DirectoryMap) XXX_DiscardUnknown() {
+ xxx_messageInfo_DirectoryMap.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DirectoryMap proto.InternalMessageInfo
+
+func (m *DirectoryMap) GetDirectories() []*Directory {
+ if m != nil {
+ return m.Directories
+ }
+ return nil
+}
+
+// Represents a file
+type File struct {
+ // Name of the file
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"`
+ // Number of lines in the file
+ Lines int32 `protobuf:"varint,2,opt,name=lines,proto3" json:"lines"`
+ // When the file was last modified
+ Mtime *types.Timestamp `protobuf:"bytes,3,opt,name=mtime,proto3" json:"mtime"`
+ // File permissions (e.g. 0644)
+ Permissions string `protobuf:"bytes,4,opt,name=permissions,proto3" json:"permissions"`
+ // Size of the file in bytes
+ Size_ int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size"`
+ // The contents of the file in bytes
+ Contents []byte `protobuf:"bytes,6,opt,name=contents,proto3" json:"contents"`
+ // Action to take on the file (e.g. update, delete, etc)
+ Action File_Action `protobuf:"varint,7,opt,name=action,proto3,enum=f5.nginx.agent.sdk.File_Action" json:"action,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *File) Reset() { *m = File{} }
+func (m *File) String() string { return proto.CompactTextString(m) }
+func (*File) ProtoMessage() {}
+func (*File) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{2}
+}
+func (m *File) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_File.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *File) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_File.Merge(m, src)
+}
+func (m *File) XXX_Size() int {
+ return m.Size()
+}
+func (m *File) XXX_DiscardUnknown() {
+ xxx_messageInfo_File.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_File proto.InternalMessageInfo
+
+func (m *File) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *File) GetLines() int32 {
+ if m != nil {
+ return m.Lines
+ }
+ return 0
+}
+
+func (m *File) GetMtime() *types.Timestamp {
+ if m != nil {
+ return m.Mtime
+ }
+ return nil
+}
+
+func (m *File) GetPermissions() string {
+ if m != nil {
+ return m.Permissions
+ }
+ return ""
+}
+
+func (m *File) GetSize_() int64 {
+ if m != nil {
+ return m.Size_
+ }
+ return 0
+}
+
+func (m *File) GetContents() []byte {
+ if m != nil {
+ return m.Contents
+ }
+ return nil
+}
+
+func (m *File) GetAction() File_Action {
+ if m != nil {
+ return m.Action
+ }
+ return File_unset
+}
+
+// Represents a directory
+type Directory struct {
+ // Name of the directory
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // When the directory was last modified
+ Mtime *types.Timestamp `protobuf:"bytes,2,opt,name=mtime,proto3" json:"mtime,omitempty"`
+ // Directory permissions (e.g. 0644)
+ Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"`
+ // Size of the directory in bytes
+ Size_ int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ // List of files in the directory
+ Files []*File `protobuf:"bytes,5,rep,name=files,proto3" json:"files,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Directory) Reset() { *m = Directory{} }
+func (m *Directory) String() string { return proto.CompactTextString(m) }
+func (*Directory) ProtoMessage() {}
+func (*Directory) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{3}
+}
+func (m *Directory) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Directory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Directory.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Directory) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Directory.Merge(m, src)
+}
+func (m *Directory) XXX_Size() int {
+ return m.Size()
+}
+func (m *Directory) XXX_DiscardUnknown() {
+ xxx_messageInfo_Directory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Directory proto.InternalMessageInfo
+
+func (m *Directory) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Directory) GetMtime() *types.Timestamp {
+ if m != nil {
+ return m.Mtime
+ }
+ return nil
+}
+
+func (m *Directory) GetPermissions() string {
+ if m != nil {
+ return m.Permissions
+ }
+ return ""
+}
+
+func (m *Directory) GetSize_() int64 {
+ if m != nil {
+ return m.Size_
+ }
+ return 0
+}
+
+func (m *Directory) GetFiles() []*File {
+ if m != nil {
+ return m.Files
+ }
+ return nil
+}
+
+// Represents a list of SSL certificates files
+type SslCertificates struct {
+ // List of SSL certificates
+ SslCerts []*SslCertificate `protobuf:"bytes,1,rep,name=ssl_certs,json=sslCerts,proto3" json:"ssl_certs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SslCertificates) Reset() { *m = SslCertificates{} }
+func (m *SslCertificates) String() string { return proto.CompactTextString(m) }
+func (*SslCertificates) ProtoMessage() {}
+func (*SslCertificates) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{4}
+}
+func (m *SslCertificates) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SslCertificates) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SslCertificates.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SslCertificates) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SslCertificates.Merge(m, src)
+}
+func (m *SslCertificates) XXX_Size() int {
+ return m.Size()
+}
+func (m *SslCertificates) XXX_DiscardUnknown() {
+ xxx_messageInfo_SslCertificates.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SslCertificates proto.InternalMessageInfo
+
+func (m *SslCertificates) GetSslCerts() []*SslCertificate {
+ if m != nil {
+ return m.SslCerts
+ }
+ return nil
+}
+
+// Represents a SSL certificate file
+type SslCertificate struct {
+ // Name of the file
+ FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"fileName"`
+ // Size of the file in bytes
+ Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size"`
+ // When the file was last modified
+ Mtime *types.Timestamp `protobuf:"bytes,3,opt,name=mtime,proto3" json:"mtime"`
+ // A time when the certificate is valid
+ Validity *CertificateDates `protobuf:"bytes,4,opt,name=validity,proto3" json:"validity"`
+ // This field contains the distinguished name (DN) of the certificate issuer
+ Issuer *CertificateName `protobuf:"bytes,5,opt,name=issuer,proto3" json:"issuer"`
+ // This dedicated object name associated with the public key, for which the certificate is issued
+ Subject *CertificateName `protobuf:"bytes,6,opt,name=subject,proto3" json:"subject"`
+ // Subject alternative names that allows users to specify additional host names for the SSL certificate
+ SubjAltNames []string `protobuf:"bytes,7,rep,name=subj_alt_names,json=subjAltNames,proto3" json:"subjectAltName"`
+ // Online Certificate Status Protocol URL
+ OcspUrl []string `protobuf:"bytes,8,rep,name=ocsp_url,json=ocspUrl,proto3" json:"ocspURL"`
+ // Public key encryption algorithm (e.g. RSA)
+ PublicKeyAlgorithm string `protobuf:"bytes,9,opt,name=public_key_algorithm,json=publicKeyAlgorithm,proto3" json:"publicKeyAlgorithm"`
+ // The signature algorithm contain a hashing algorithm and an encryption algorithm (e.g. sha256RSA where sha256 is the hashing algorithm and RSA is the encryption algorithm)
+ SignatureAlgorithm string `protobuf:"bytes,10,opt,name=signature_algorithm,json=signatureAlgorithm,proto3" json:"signatureAlgorithm"`
+ // Used to uniquely identify the certificate within a CA's systems
+ SerialNumber string `protobuf:"bytes,11,opt,name=serial_number,json=serialNumber,proto3" json:"serialNumber"`
+ // The subject key identifier extension provides a means of identifying certificates that contain a particular public key
+ SubjectKeyIdentifier string `protobuf:"bytes,12,opt,name=subject_key_identifier,json=subjectKeyIdentifier,proto3" json:"subjectKeyIdentifier"`
+ // SSL certificate fingerprint
+ Fingerprint string `protobuf:"bytes,13,opt,name=fingerprint,proto3" json:"fingerprint"`
+ // SSL certificate fingerprint algorithm
+ FingerprintAlgorithm string `protobuf:"bytes,14,opt,name=fingerprint_algorithm,json=fingerprintAlgorithm,proto3" json:"fingerprintAlgorithm"`
+ // There are three versions of certificates: 1, 2 and 3, numbered as 0, 1 and 2. Version 1 supports only the basic fields; Version 2 adds unique identifiers, which represent two additional fields; Version 3 adds extensions.
+ Version int64 `protobuf:"varint,15,opt,name=version,proto3" json:"version"`
+ // The authority key identifier extension provides a means of identifying the Public Key corresponding to the Private Key used to sign a certificate
+ AuthorityKeyIdentifier string `protobuf:"bytes,16,opt,name=authority_key_identifier,json=authorityKeyIdentifier,proto3" json:"authorityKeyIdentifier"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SslCertificate) Reset() { *m = SslCertificate{} }
+func (m *SslCertificate) String() string { return proto.CompactTextString(m) }
+func (*SslCertificate) ProtoMessage() {}
+func (*SslCertificate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{5}
+}
+func (m *SslCertificate) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SslCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SslCertificate.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SslCertificate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SslCertificate.Merge(m, src)
+}
+func (m *SslCertificate) XXX_Size() int {
+ return m.Size()
+}
+func (m *SslCertificate) XXX_DiscardUnknown() {
+ xxx_messageInfo_SslCertificate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SslCertificate proto.InternalMessageInfo
+
+func (m *SslCertificate) GetFileName() string {
+ if m != nil {
+ return m.FileName
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetSize_() int64 {
+ if m != nil {
+ return m.Size_
+ }
+ return 0
+}
+
+func (m *SslCertificate) GetMtime() *types.Timestamp {
+ if m != nil {
+ return m.Mtime
+ }
+ return nil
+}
+
+func (m *SslCertificate) GetValidity() *CertificateDates {
+ if m != nil {
+ return m.Validity
+ }
+ return nil
+}
+
+func (m *SslCertificate) GetIssuer() *CertificateName {
+ if m != nil {
+ return m.Issuer
+ }
+ return nil
+}
+
+func (m *SslCertificate) GetSubject() *CertificateName {
+ if m != nil {
+ return m.Subject
+ }
+ return nil
+}
+
+func (m *SslCertificate) GetSubjAltNames() []string {
+ if m != nil {
+ return m.SubjAltNames
+ }
+ return nil
+}
+
+func (m *SslCertificate) GetOcspUrl() []string {
+ if m != nil {
+ return m.OcspUrl
+ }
+ return nil
+}
+
+func (m *SslCertificate) GetPublicKeyAlgorithm() string {
+ if m != nil {
+ return m.PublicKeyAlgorithm
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetSignatureAlgorithm() string {
+ if m != nil {
+ return m.SignatureAlgorithm
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetSerialNumber() string {
+ if m != nil {
+ return m.SerialNumber
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetSubjectKeyIdentifier() string {
+ if m != nil {
+ return m.SubjectKeyIdentifier
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetFingerprint() string {
+ if m != nil {
+ return m.Fingerprint
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetFingerprintAlgorithm() string {
+ if m != nil {
+ return m.FingerprintAlgorithm
+ }
+ return ""
+}
+
+func (m *SslCertificate) GetVersion() int64 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
+func (m *SslCertificate) GetAuthorityKeyIdentifier() string {
+ if m != nil {
+ return m.AuthorityKeyIdentifier
+ }
+ return ""
+}
+
+// Represents the dates for which a certificate is valid
+type CertificateDates struct {
+ // The start date that for when the certificate is valid
+ NotBefore int64 `protobuf:"varint,1,opt,name=not_before,json=notBefore,proto3" json:"notBefore"`
+ // The end date that for when the certificate is valid
+ NotAfter int64 `protobuf:"varint,2,opt,name=not_after,json=notAfter,proto3" json:"notAfter"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CertificateDates) Reset() { *m = CertificateDates{} }
+func (m *CertificateDates) String() string { return proto.CompactTextString(m) }
+func (*CertificateDates) ProtoMessage() {}
+func (*CertificateDates) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{6}
+}
+func (m *CertificateDates) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CertificateDates) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CertificateDates.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CertificateDates) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CertificateDates.Merge(m, src)
+}
+func (m *CertificateDates) XXX_Size() int {
+ return m.Size()
+}
+func (m *CertificateDates) XXX_DiscardUnknown() {
+ xxx_messageInfo_CertificateDates.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CertificateDates proto.InternalMessageInfo
+
+func (m *CertificateDates) GetNotBefore() int64 {
+ if m != nil {
+ return m.NotBefore
+ }
+ return 0
+}
+
+func (m *CertificateDates) GetNotAfter() int64 {
+ if m != nil {
+ return m.NotAfter
+ }
+ return 0
+}
+
+// Represents a Distinguished Name (DN)
+type CertificateName struct {
+ // The fully qualified domain name (e.g. www.example.com)
+ CommonName string `protobuf:"bytes,1,opt,name=common_name,json=commonName,proto3" json:"commonName"`
+ // Country
+ Country []string `protobuf:"bytes,2,rep,name=country,proto3" json:"country"`
+ // State
+ State []string `protobuf:"bytes,3,rep,name=state,proto3" json:"state"`
+ // Locality
+ Locality []string `protobuf:"bytes,4,rep,name=locality,proto3" json:"locality"`
+ // Organization
+ Organization []string `protobuf:"bytes,5,rep,name=organization,proto3" json:"organization"`
+ // Organizational Unit
+ OrganizationalUnit []string `protobuf:"bytes,6,rep,name=organizational_unit,json=organizationalUnit,proto3" json:"organizationalUnit"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CertificateName) Reset() { *m = CertificateName{} }
+func (m *CertificateName) String() string { return proto.CompactTextString(m) }
+func (*CertificateName) ProtoMessage() {}
+func (*CertificateName) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{7}
+}
+func (m *CertificateName) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CertificateName) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CertificateName.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CertificateName) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CertificateName.Merge(m, src)
+}
+func (m *CertificateName) XXX_Size() int {
+ return m.Size()
+}
+func (m *CertificateName) XXX_DiscardUnknown() {
+ xxx_messageInfo_CertificateName.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CertificateName proto.InternalMessageInfo
+
+func (m *CertificateName) GetCommonName() string {
+ if m != nil {
+ return m.CommonName
+ }
+ return ""
+}
+
+func (m *CertificateName) GetCountry() []string {
+ if m != nil {
+ return m.Country
+ }
+ return nil
+}
+
+func (m *CertificateName) GetState() []string {
+ if m != nil {
+ return m.State
+ }
+ return nil
+}
+
+func (m *CertificateName) GetLocality() []string {
+ if m != nil {
+ return m.Locality
+ }
+ return nil
+}
+
+func (m *CertificateName) GetOrganization() []string {
+ if m != nil {
+ return m.Organization
+ }
+ return nil
+}
+
+func (m *CertificateName) GetOrganizationalUnit() []string {
+ if m != nil {
+ return m.OrganizationalUnit
+ }
+ return nil
+}
+
+// Represents a zipped file
+type ZippedFile struct {
+ // The contents of the file in bytes
+ Contents []byte `protobuf:"bytes,1,opt,name=contents,proto3" json:"contents"`
+ // File checksum
+ Checksum string `protobuf:"bytes,2,opt,name=checksum,proto3" json:"checksum"`
+ // The directory where the file is located
+ RootDirectory string `protobuf:"bytes,3,opt,name=root_directory,json=rootDirectory,proto3" json:"root_directory"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ZippedFile) Reset() { *m = ZippedFile{} }
+func (m *ZippedFile) String() string { return proto.CompactTextString(m) }
+func (*ZippedFile) ProtoMessage() {}
+func (*ZippedFile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{8}
+}
+func (m *ZippedFile) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ZippedFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ZippedFile.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ZippedFile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ZippedFile.Merge(m, src)
+}
+func (m *ZippedFile) XXX_Size() int {
+ return m.Size()
+}
+func (m *ZippedFile) XXX_DiscardUnknown() {
+ xxx_messageInfo_ZippedFile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ZippedFile proto.InternalMessageInfo
+
+func (m *ZippedFile) GetContents() []byte {
+ if m != nil {
+ return m.Contents
+ }
+ return nil
+}
+
+func (m *ZippedFile) GetChecksum() string {
+ if m != nil {
+ return m.Checksum
+ }
+ return ""
+}
+
+func (m *ZippedFile) GetRootDirectory() string {
+ if m != nil {
+ return m.RootDirectory
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("f5.nginx.agent.sdk.File_Action", File_Action_name, File_Action_value)
+ proto.RegisterType((*Metadata)(nil), "f5.nginx.agent.sdk.Metadata")
+ proto.RegisterType((*DirectoryMap)(nil), "f5.nginx.agent.sdk.DirectoryMap")
+ proto.RegisterType((*File)(nil), "f5.nginx.agent.sdk.File")
+ proto.RegisterType((*Directory)(nil), "f5.nginx.agent.sdk.Directory")
+ proto.RegisterType((*SslCertificates)(nil), "f5.nginx.agent.sdk.SslCertificates")
+ proto.RegisterType((*SslCertificate)(nil), "f5.nginx.agent.sdk.SslCertificate")
+ proto.RegisterType((*CertificateDates)(nil), "f5.nginx.agent.sdk.CertificateDates")
+ proto.RegisterType((*CertificateName)(nil), "f5.nginx.agent.sdk.CertificateName")
+ proto.RegisterType((*ZippedFile)(nil), "f5.nginx.agent.sdk.ZippedFile")
+}
+
+func init() { proto.RegisterFile("common.proto", fileDescriptor_555bd8c177793206) }
+
+var fileDescriptor_555bd8c177793206 = []byte{
+ // 1178 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcf, 0x8e, 0xdb, 0x36,
+ 0x13, 0xff, 0x64, 0xaf, 0x77, 0xad, 0xb1, 0xd7, 0x6b, 0xf0, 0x4b, 0x03, 0x61, 0xd1, 0x46, 0x86,
+ 0xfb, 0x07, 0x4e, 0xd1, 0xda, 0xed, 0xb6, 0x41, 0x5a, 0x04, 0x68, 0xb0, 0x4e, 0xd0, 0x74, 0x9b,
+ 0x66, 0x0b, 0xb0, 0xc9, 0x25, 0x17, 0x83, 0x96, 0x68, 0x2d, 0xbb, 0x32, 0x69, 0x88, 0x54, 0x50,
+ 0xe7, 0x55, 0xfa, 0x18, 0x3d, 0xf4, 0x15, 0x7a, 0xec, 0x13, 0x08, 0x45, 0x8e, 0x3a, 0xf4, 0xd8,
+ 0x4b, 0x2f, 0x05, 0x49, 0x49, 0x96, 0xb3, 0x4e, 0x52, 0xa0, 0x17, 0x71, 0xe6, 0x37, 0x33, 0x14,
+ 0xf9, 0xe3, 0x70, 0x86, 0xd0, 0x0d, 0xc4, 0x72, 0x29, 0xf8, 0x78, 0x95, 0x08, 0x25, 0x10, 0x5a,
+ 0xdc, 0x1a, 0xf3, 0x88, 0xf1, 0x9f, 0xc6, 0x24, 0xa2, 0x5c, 0x8d, 0x65, 0x78, 0x79, 0x0c, 0x91,
+ 0x88, 0x84, 0xb5, 0x1f, 0xfb, 0x91, 0x10, 0x51, 0x4c, 0x27, 0x46, 0x9b, 0xa7, 0x8b, 0x89, 0x62,
+ 0x4b, 0x2a, 0x15, 0x59, 0xae, 0xac, 0xc3, 0xf0, 0x4f, 0x07, 0xda, 0x8f, 0xa8, 0x22, 0x21, 0x51,
+ 0x04, 0x3d, 0x00, 0xb7, 0xb2, 0x7b, 0xce, 0xc0, 0x19, 0x75, 0x4e, 0x8e, 0xc7, 0x76, 0x86, 0x71,
+ 0x39, 0xc3, 0xf8, 0x71, 0xe9, 0x31, 0x3d, 0xcc, 0x33, 0x7f, 0x13, 0x80, 0x37, 0x22, 0xfa, 0x10,
+ 0xdc, 0x20, 0x66, 0x94, 0xab, 0x19, 0x0b, 0xbd, 0xc6, 0xc0, 0x19, 0xb9, 0xd6, 0xb9, 0x02, 0x71,
+ 0xdb, 0x8a, 0x67, 0x21, 0xfa, 0x18, 0x60, 0x49, 0xa5, 0x24, 0x11, 0xd5, 0xce, 0x4d, 0xe3, 0xdc,
+ 0xcb, 0x33, 0xbf, 0x86, 0x62, 0xb7, 0x90, 0xcf, 0x42, 0xf4, 0x15, 0xf4, 0x83, 0x58, 0xa4, 0xe1,
+ 0x8c, 0x04, 0x81, 0x48, 0xed, 0x1f, 0xf6, 0x4c, 0xd0, 0xb5, 0x3c, 0xf3, 0xaf, 0xd8, 0x70, 0xcf,
+ 0x20, 0xa7, 0x16, 0x38, 0x0b, 0x87, 0xdf, 0x43, 0xf7, 0x3e, 0x4b, 0x68, 0xa0, 0x44, 0xb2, 0x7e,
+ 0x44, 0x56, 0xe8, 0x2e, 0x74, 0xc2, 0x42, 0x67, 0x54, 0x7a, 0xce, 0xa0, 0x39, 0xea, 0x9c, 0xbc,
+ 0x33, 0xbe, 0xca, 0xeb, 0xb8, 0x0a, 0xc3, 0xf5, 0x88, 0xe1, 0xdf, 0x0d, 0xd8, 0xfb, 0x9a, 0xc5,
+ 0x14, 0xbd, 0x0d, 0x7b, 0x9c, 0x2c, 0xa9, 0x21, 0xce, 0x9d, 0xb6, 0xf3, 0xcc, 0x37, 0x3a, 0x36,
+ 0x5f, 0xe4, 0x43, 0x2b, 0x66, 0x9c, 0x4a, 0x43, 0x47, 0x6b, 0xea, 0xe6, 0x99, 0x6f, 0x01, 0x6c,
+ 0x07, 0x74, 0x07, 0x5a, 0x4b, 0xcd, 0xa0, 0xa1, 0xe0, 0xf5, 0xc4, 0x9b, 0x60, 0xe3, 0x8c, 0xed,
+ 0x80, 0x3e, 0x85, 0xce, 0x8a, 0x26, 0x4b, 0x26, 0x25, 0x13, 0x5c, 0x16, 0x84, 0x1c, 0xe5, 0x99,
+ 0x5f, 0x87, 0x71, 0x5d, 0xd1, 0xcb, 0x95, 0xec, 0x39, 0xf5, 0x5a, 0x03, 0x67, 0xd4, 0xb4, 0xcb,
+ 0xd5, 0x3a, 0x36, 0x5f, 0x34, 0x82, 0x76, 0x20, 0xb8, 0xa2, 0x5c, 0x49, 0x6f, 0x7f, 0xe0, 0x8c,
+ 0xba, 0xd3, 0x6e, 0x9e, 0xf9, 0x15, 0x86, 0x2b, 0x09, 0xdd, 0x86, 0x7d, 0x12, 0x28, 0x26, 0xb8,
+ 0x77, 0x30, 0x70, 0x46, 0xbd, 0x13, 0x7f, 0x17, 0x77, 0x9a, 0xa0, 0xf1, 0xa9, 0x71, 0xc3, 0x85,
+ 0xfb, 0xf0, 0x1e, 0xec, 0x5b, 0x04, 0xb9, 0xd0, 0x4a, 0xb9, 0xa4, 0xaa, 0xff, 0x3f, 0x74, 0x08,
+ 0x6e, 0xca, 0x83, 0x0b, 0xc2, 0x23, 0x1a, 0xf6, 0x1d, 0x74, 0x00, 0x4d, 0x12, 0x86, 0xfd, 0x06,
+ 0x02, 0xd8, 0x4f, 0x57, 0x21, 0x51, 0xb4, 0xdf, 0xd4, 0x72, 0x48, 0x63, 0xaa, 0x68, 0x7f, 0x6f,
+ 0xf8, 0xab, 0x03, 0x6e, 0x75, 0x30, 0x08, 0xd5, 0x8f, 0xa0, 0x20, 0xfe, 0x93, 0x92, 0xd7, 0xc6,
+ 0x9b, 0x78, 0x2d, 0xc9, 0x1c, 0x6c, 0x93, 0x69, 0x52, 0x72, 0x9b, 0x3b, 0x54, 0x70, 0xa7, 0x79,
+ 0x6e, 0x16, 0x8c, 0x8d, 0xa1, 0xb5, 0x60, 0x31, 0x95, 0x5e, 0xcb, 0xa4, 0x90, 0xf7, 0x2a, 0x1a,
+ 0xb0, 0x75, 0x1b, 0x62, 0x38, 0xfa, 0x41, 0xc6, 0xf7, 0x68, 0xa2, 0xd8, 0x82, 0x05, 0x44, 0x51,
+ 0x89, 0xee, 0x82, 0x2b, 0x65, 0x3c, 0x0b, 0x68, 0xa2, 0xca, 0x4c, 0x1c, 0xee, 0x9a, 0x66, 0x3b,
+ 0x0e, 0xb7, 0xa5, 0xd5, 0xe5, 0xf0, 0xaf, 0x03, 0xe8, 0x6d, 0x1b, 0xd1, 0x4d, 0x70, 0xf5, 0xff,
+ 0x66, 0xb5, 0xd4, 0x34, 0x27, 0xa9, 0xc1, 0x73, 0x9d, 0x9e, 0x95, 0x54, 0x65, 0x44, 0x63, 0x67,
+ 0x46, 0xfc, 0xa7, 0xfc, 0x3c, 0x87, 0xf6, 0x33, 0x12, 0xb3, 0x90, 0xa9, 0xb5, 0x21, 0xad, 0x73,
+ 0xf2, 0xde, 0xae, 0x8d, 0xd5, 0x16, 0x7e, 0x5f, 0x33, 0x62, 0x97, 0x5a, 0x46, 0xe2, 0x4a, 0x42,
+ 0x0f, 0x60, 0x9f, 0x49, 0x99, 0xd2, 0xc4, 0xa4, 0x6f, 0xe7, 0xe4, 0xdd, 0x37, 0xcc, 0xa6, 0xf7,
+ 0x37, 0x85, 0x3c, 0xf3, 0x8b, 0x30, 0x5c, 0x8c, 0xe8, 0x5b, 0x38, 0x90, 0xe9, 0xfc, 0x47, 0x1a,
+ 0x28, 0x93, 0xe6, 0xff, 0x72, 0xa6, 0x4e, 0x9e, 0xf9, 0x65, 0x1c, 0x2e, 0x05, 0xf4, 0x05, 0xf4,
+ 0xb4, 0x38, 0x23, 0xb1, 0x32, 0x74, 0x4b, 0xef, 0x60, 0xd0, 0x1c, 0xb9, 0x53, 0x94, 0x67, 0x7e,
+ 0xaf, 0x70, 0x3a, 0x8d, 0x95, 0x61, 0xbd, 0xab, 0xf5, 0x42, 0x91, 0xe8, 0x03, 0x68, 0x8b, 0x40,
+ 0xae, 0x66, 0x69, 0x12, 0x7b, 0x6d, 0x13, 0x63, 0xfe, 0xa0, 0xb1, 0x27, 0xf8, 0x3b, 0x6c, 0x85,
+ 0x24, 0x46, 0xdf, 0xc0, 0xb5, 0x55, 0x3a, 0x8f, 0x59, 0x30, 0xbb, 0xa4, 0xeb, 0x19, 0x89, 0x23,
+ 0x91, 0x30, 0x75, 0xb1, 0xf4, 0x5c, 0x73, 0xae, 0xd7, 0xf3, 0xcc, 0x47, 0xd6, 0xfe, 0x90, 0xae,
+ 0x4f, 0x4b, 0x2b, 0xde, 0x81, 0xa1, 0x07, 0xf0, 0x7f, 0xc9, 0x22, 0x4e, 0x54, 0x9a, 0xd0, 0xda,
+ 0x44, 0xb0, 0x99, 0xa8, 0x32, 0xd7, 0x26, 0xba, 0x8a, 0xa1, 0x5b, 0x70, 0x28, 0x69, 0xc2, 0x48,
+ 0x3c, 0xe3, 0xe9, 0x72, 0x4e, 0x13, 0xaf, 0x63, 0xa6, 0xe8, 0xe7, 0x99, 0xdf, 0xb5, 0x86, 0x73,
+ 0x83, 0xe3, 0x2d, 0x0d, 0x9d, 0xc3, 0xf5, 0x82, 0x11, 0xb3, 0x15, 0x16, 0x52, 0xae, 0x19, 0xa6,
+ 0x89, 0xd7, 0x35, 0xf1, 0x5e, 0x9e, 0xf9, 0xd7, 0x0a, 0x8f, 0x87, 0x74, 0x7d, 0x56, 0xd9, 0xf1,
+ 0x4e, 0x54, 0x17, 0xc0, 0x05, 0xe3, 0x11, 0x4d, 0x56, 0x09, 0xe3, 0xca, 0x3b, 0xdc, 0x14, 0xc0,
+ 0x1a, 0x8c, 0xeb, 0x0a, 0x7a, 0x04, 0x6f, 0xd5, 0xd4, 0x1a, 0x09, 0xbd, 0xcd, 0x0a, 0x6a, 0x0e,
+ 0x1b, 0x1a, 0x76, 0xa2, 0xe8, 0x7d, 0x38, 0x78, 0x46, 0x13, 0x5d, 0x1f, 0xbc, 0x23, 0x73, 0x81,
+ 0xcc, 0x11, 0x16, 0x10, 0x2e, 0x05, 0xf4, 0x18, 0x3c, 0x92, 0xaa, 0x0b, 0x1d, 0xb4, 0x7e, 0x79,
+ 0xeb, 0x7d, 0xf3, 0xe3, 0xe3, 0x3c, 0xf3, 0xaf, 0x57, 0x3e, 0xdb, 0x9b, 0x7f, 0x05, 0x3e, 0xbc,
+ 0x84, 0xfe, 0xcb, 0x77, 0x07, 0x7d, 0x04, 0xc0, 0x85, 0x9a, 0xcd, 0xe9, 0x42, 0x24, 0xf6, 0xea,
+ 0x37, 0x6d, 0x17, 0xe6, 0x42, 0x4d, 0x0d, 0x88, 0x37, 0xa2, 0xae, 0x13, 0xda, 0x9b, 0x2c, 0x14,
+ 0x4d, 0x8a, 0x0a, 0x60, 0x2e, 0x1f, 0x17, 0xea, 0x54, 0x63, 0xb8, 0x92, 0x86, 0xbf, 0x34, 0xe0,
+ 0xe8, 0xa5, 0x1b, 0x81, 0x26, 0xd0, 0xb1, 0x0f, 0x93, 0x7a, 0xa1, 0x31, 0x6d, 0xdc, 0xc2, 0x26,
+ 0xe9, 0x6b, 0xb2, 0xa6, 0xcb, 0xb4, 0xe4, 0x64, 0xed, 0x35, 0x36, 0x19, 0x5f, 0x40, 0xb8, 0x14,
+ 0x74, 0xdb, 0x94, 0x8a, 0x28, 0x5d, 0x75, 0xb4, 0x93, 0xa9, 0x2c, 0x06, 0xc0, 0x76, 0xd0, 0x8d,
+ 0x2a, 0x16, 0x01, 0x89, 0x6d, 0x65, 0x69, 0x96, 0xe5, 0xad, 0xc4, 0x70, 0x25, 0xa1, 0xcf, 0xa1,
+ 0x2b, 0x92, 0x88, 0x70, 0xf6, 0x9c, 0x98, 0x76, 0xd5, 0x32, 0xde, 0x26, 0x51, 0xeb, 0x38, 0xde,
+ 0xd2, 0xf4, 0x45, 0xa9, 0xeb, 0x24, 0x9e, 0xa5, 0x9c, 0xe9, 0x62, 0xd1, 0x2c, 0x2f, 0xca, 0xb6,
+ 0xf9, 0x09, 0x67, 0x0a, 0xef, 0xc0, 0x86, 0x3f, 0x3b, 0x00, 0x4f, 0xd9, 0x6a, 0x45, 0x43, 0xf3,
+ 0x5a, 0xa8, 0x37, 0x58, 0xe7, 0xb5, 0x0d, 0x56, 0x7b, 0x5e, 0xd0, 0xe0, 0x52, 0xa6, 0xcb, 0xe2,
+ 0x2d, 0x65, 0x3d, 0x0b, 0x0c, 0x57, 0x12, 0xfa, 0x12, 0x7a, 0x89, 0x10, 0x6a, 0x56, 0x3e, 0x4f,
+ 0xd6, 0xc5, 0x73, 0xca, 0x14, 0xa0, 0x6d, 0x0b, 0x3e, 0xd4, 0x7a, 0xd5, 0x39, 0xa7, 0xb7, 0x7f,
+ 0x7b, 0x71, 0xc3, 0xf9, 0xfd, 0xc5, 0x0d, 0xe7, 0x8f, 0x17, 0x37, 0x9c, 0xa7, 0x37, 0x23, 0xa6,
+ 0x2e, 0xd2, 0xf9, 0x38, 0x10, 0xcb, 0x89, 0xa9, 0x85, 0x13, 0x53, 0x0b, 0x27, 0x32, 0xbc, 0x9c,
+ 0x3c, 0x3b, 0xb1, 0xaf, 0xc9, 0x3b, 0xb6, 0xf0, 0xef, 0x9b, 0xe1, 0xb3, 0x7f, 0x02, 0x00, 0x00,
+ 0xff, 0xff, 0x95, 0x41, 0xf8, 0x87, 0x98, 0x0a, 0x00, 0x00,
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.CloudAccountId) > 0 {
+ i -= len(m.CloudAccountId)
+ copy(dAtA[i:], m.CloudAccountId)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.CloudAccountId)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.MessageId) > 0 {
+ i -= len(m.MessageId)
+ copy(dAtA[i:], m.MessageId)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.MessageId)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Timestamp != nil {
+ {
+ size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DirectoryMap) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DirectoryMap) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DirectoryMap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Directories) > 0 {
+ for iNdEx := len(m.Directories) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Directories[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *File) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *File) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *File) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Action != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.Contents) > 0 {
+ i -= len(m.Contents)
+ copy(dAtA[i:], m.Contents)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Contents)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Size_ != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x28
+ }
+ if len(m.Permissions) > 0 {
+ i -= len(m.Permissions)
+ copy(dAtA[i:], m.Permissions)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Permissions)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Mtime != nil {
+ {
+ size, err := m.Mtime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Lines != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.Lines))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Directory) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Directory) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Directory) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Files) > 0 {
+ for iNdEx := len(m.Files) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Files[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Size_ != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Permissions) > 0 {
+ i -= len(m.Permissions)
+ copy(dAtA[i:], m.Permissions)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Permissions)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Mtime != nil {
+ {
+ size, err := m.Mtime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SslCertificates) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SslCertificates) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SslCertificates) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.SslCerts) > 0 {
+ for iNdEx := len(m.SslCerts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SslCerts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SslCertificate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SslCertificate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SslCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.AuthorityKeyIdentifier) > 0 {
+ i -= len(m.AuthorityKeyIdentifier)
+ copy(dAtA[i:], m.AuthorityKeyIdentifier)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.AuthorityKeyIdentifier)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.Version != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.Version))
+ i--
+ dAtA[i] = 0x78
+ }
+ if len(m.FingerprintAlgorithm) > 0 {
+ i -= len(m.FingerprintAlgorithm)
+ copy(dAtA[i:], m.FingerprintAlgorithm)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.FingerprintAlgorithm)))
+ i--
+ dAtA[i] = 0x72
+ }
+ if len(m.Fingerprint) > 0 {
+ i -= len(m.Fingerprint)
+ copy(dAtA[i:], m.Fingerprint)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Fingerprint)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.SubjectKeyIdentifier) > 0 {
+ i -= len(m.SubjectKeyIdentifier)
+ copy(dAtA[i:], m.SubjectKeyIdentifier)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.SubjectKeyIdentifier)))
+ i--
+ dAtA[i] = 0x62
+ }
+ if len(m.SerialNumber) > 0 {
+ i -= len(m.SerialNumber)
+ copy(dAtA[i:], m.SerialNumber)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.SerialNumber)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.SignatureAlgorithm) > 0 {
+ i -= len(m.SignatureAlgorithm)
+ copy(dAtA[i:], m.SignatureAlgorithm)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.SignatureAlgorithm)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.PublicKeyAlgorithm) > 0 {
+ i -= len(m.PublicKeyAlgorithm)
+ copy(dAtA[i:], m.PublicKeyAlgorithm)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.PublicKeyAlgorithm)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.OcspUrl) > 0 {
+ for iNdEx := len(m.OcspUrl) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.OcspUrl[iNdEx])
+ copy(dAtA[i:], m.OcspUrl[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.OcspUrl[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.SubjAltNames) > 0 {
+ for iNdEx := len(m.SubjAltNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SubjAltNames[iNdEx])
+ copy(dAtA[i:], m.SubjAltNames[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.SubjAltNames[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.Subject != nil {
+ {
+ size, err := m.Subject.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Issuer != nil {
+ {
+ size, err := m.Issuer.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Validity != nil {
+ {
+ size, err := m.Validity.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Mtime != nil {
+ {
+ size, err := m.Mtime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommon(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Size_ != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.FileName) > 0 {
+ i -= len(m.FileName)
+ copy(dAtA[i:], m.FileName)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.FileName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateDates) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateDates) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CertificateDates) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.NotAfter != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.NotAfter))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NotBefore != 0 {
+ i = encodeVarintCommon(dAtA, i, uint64(m.NotBefore))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateName) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateName) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CertificateName) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.OrganizationalUnit) > 0 {
+ for iNdEx := len(m.OrganizationalUnit) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.OrganizationalUnit[iNdEx])
+ copy(dAtA[i:], m.OrganizationalUnit[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.OrganizationalUnit[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.Organization) > 0 {
+ for iNdEx := len(m.Organization) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Organization[iNdEx])
+ copy(dAtA[i:], m.Organization[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Organization[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Locality) > 0 {
+ for iNdEx := len(m.Locality) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Locality[iNdEx])
+ copy(dAtA[i:], m.Locality[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Locality[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.State) > 0 {
+ for iNdEx := len(m.State) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.State[iNdEx])
+ copy(dAtA[i:], m.State[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.State[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Country) > 0 {
+ for iNdEx := len(m.Country) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Country[iNdEx])
+ copy(dAtA[i:], m.Country[iNdEx])
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Country[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.CommonName) > 0 {
+ i -= len(m.CommonName)
+ copy(dAtA[i:], m.CommonName)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.CommonName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ZippedFile) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ZippedFile) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ZippedFile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.RootDirectory) > 0 {
+ i -= len(m.RootDirectory)
+ copy(dAtA[i:], m.RootDirectory)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.RootDirectory)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Checksum) > 0 {
+ i -= len(m.Checksum)
+ copy(dAtA[i:], m.Checksum)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Checksum)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Contents) > 0 {
+ i -= len(m.Contents)
+ copy(dAtA[i:], m.Contents)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Contents)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintCommon(dAtA []byte, offset int, v uint64) int {
+ offset -= sovCommon(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Metadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Timestamp != nil {
+ l = m.Timestamp.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.MessageId)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.CloudAccountId)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DirectoryMap) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Directories) > 0 {
+ for _, e := range m.Directories {
+ l = e.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *File) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Lines != 0 {
+ n += 1 + sovCommon(uint64(m.Lines))
+ }
+ if m.Mtime != nil {
+ l = m.Mtime.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.Permissions)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Size_ != 0 {
+ n += 1 + sovCommon(uint64(m.Size_))
+ }
+ l = len(m.Contents)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Action != 0 {
+ n += 1 + sovCommon(uint64(m.Action))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Directory) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Mtime != nil {
+ l = m.Mtime.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.Permissions)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Size_ != 0 {
+ n += 1 + sovCommon(uint64(m.Size_))
+ }
+ if len(m.Files) > 0 {
+ for _, e := range m.Files {
+ l = e.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SslCertificates) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.SslCerts) > 0 {
+ for _, e := range m.SslCerts {
+ l = e.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SslCertificate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.FileName)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Size_ != 0 {
+ n += 1 + sovCommon(uint64(m.Size_))
+ }
+ if m.Mtime != nil {
+ l = m.Mtime.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Validity != nil {
+ l = m.Validity.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Issuer != nil {
+ l = m.Issuer.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Subject != nil {
+ l = m.Subject.Size()
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if len(m.SubjAltNames) > 0 {
+ for _, s := range m.SubjAltNames {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if len(m.OcspUrl) > 0 {
+ for _, s := range m.OcspUrl {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ l = len(m.PublicKeyAlgorithm)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.SignatureAlgorithm)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.SerialNumber)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.SubjectKeyIdentifier)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.Fingerprint)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.FingerprintAlgorithm)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.Version != 0 {
+ n += 1 + sovCommon(uint64(m.Version))
+ }
+ l = len(m.AuthorityKeyIdentifier)
+ if l > 0 {
+ n += 2 + l + sovCommon(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CertificateDates) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NotBefore != 0 {
+ n += 1 + sovCommon(uint64(m.NotBefore))
+ }
+ if m.NotAfter != 0 {
+ n += 1 + sovCommon(uint64(m.NotAfter))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CertificateName) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CommonName)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if len(m.Country) > 0 {
+ for _, s := range m.Country {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if len(m.State) > 0 {
+ for _, s := range m.State {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if len(m.Locality) > 0 {
+ for _, s := range m.Locality {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if len(m.Organization) > 0 {
+ for _, s := range m.Organization {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if len(m.OrganizationalUnit) > 0 {
+ for _, s := range m.OrganizationalUnit {
+ l = len(s)
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ZippedFile) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Contents)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.Checksum)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.RootDirectory)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovCommon(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozCommon(x uint64) (n int) {
+ return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Timestamp == nil {
+ m.Timestamp = &types.Timestamp{}
+ }
+ if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MessageId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MessageId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CloudAccountId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CloudAccountId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DirectoryMap) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DirectoryMap: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DirectoryMap: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Directories", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Directories = append(m.Directories, &Directory{})
+ if err := m.Directories[len(m.Directories)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *File) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: File: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: File: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lines", wireType)
+ }
+ m.Lines = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lines |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mtime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Mtime == nil {
+ m.Mtime = &types.Timestamp{}
+ }
+ if err := m.Mtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Permissions = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+ }
+ m.Size_ = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Size_ |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Contents", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Contents = append(m.Contents[:0], dAtA[iNdEx:postIndex]...)
+ if m.Contents == nil {
+ m.Contents = []byte{}
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= File_Action(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Directory) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Directory: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Directory: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mtime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Mtime == nil {
+ m.Mtime = &types.Timestamp{}
+ }
+ if err := m.Mtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Permissions = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+ }
+ m.Size_ = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Size_ |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Files", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Files = append(m.Files, &File{})
+ if err := m.Files[len(m.Files)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SslCertificates) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SslCertificates: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SslCertificates: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SslCerts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SslCerts = append(m.SslCerts, &SslCertificate{})
+ if err := m.SslCerts[len(m.SslCerts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SslCertificate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SslCertificate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SslCertificate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FileName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+ }
+ m.Size_ = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Size_ |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mtime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Mtime == nil {
+ m.Mtime = &types.Timestamp{}
+ }
+ if err := m.Mtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Validity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Validity == nil {
+ m.Validity = &CertificateDates{}
+ }
+ if err := m.Validity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Issuer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Issuer == nil {
+ m.Issuer = &CertificateName{}
+ }
+ if err := m.Issuer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Subject == nil {
+ m.Subject = &CertificateName{}
+ }
+ if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubjAltNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubjAltNames = append(m.SubjAltNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OcspUrl", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OcspUrl = append(m.OcspUrl, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyAlgorithm", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PublicKeyAlgorithm = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureAlgorithm", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignatureAlgorithm = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SerialNumber", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SerialNumber = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubjectKeyIdentifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubjectKeyIdentifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Fingerprint", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Fingerprint = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FingerprintAlgorithm", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FingerprintAlgorithm = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Version |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthorityKeyIdentifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AuthorityKeyIdentifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateDates) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateDates: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateDates: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType)
+ }
+ m.NotBefore = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NotBefore |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType)
+ }
+ m.NotAfter = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NotAfter |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateName) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateName: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateName: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CommonName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Country", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Country = append(m.Country, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.State = append(m.State, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Locality", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Locality = append(m.Locality, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Organization = append(m.Organization, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrganizationalUnit", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OrganizationalUnit = append(m.OrganizationalUnit, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ZippedFile) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ZippedFile: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ZippedFile: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Contents", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Contents = append(m.Contents[:0], dAtA[iNdEx:postIndex]...)
+ if m.Contents == nil {
+ m.Contents = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Checksum = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RootDirectory", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RootDirectory = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipCommon(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthCommon
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupCommon
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCommon
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.proto
new file mode 100644
index 000000000..092d4dad1
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common.proto
@@ -0,0 +1,146 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "gogo.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents the metadata for a message
+message Metadata {
+ // timestamp defines the time of message creation
+ google.protobuf.Timestamp timestamp = 1 [(gogoproto.jsontag) = "timestamp"];
+ // Client ID
+ string client_id = 2 [(gogoproto.jsontag) = "client_id"];
+ // Message ID
+ string message_id = 3 [(gogoproto.jsontag) = "message_id"];
+ // Cloud Account ID (e.g. AWS/Azure/GCP account ID)
+ string cloud_account_id = 4 [(gogoproto.jsontag) = "cloud_account_id"];
+}
+
+// Represents a map of directories & files on the system
+message DirectoryMap {
+ // List of directories
+ repeated Directory directories = 1;
+}
+
+// Represents a file
+message File {
+ // Name of the file
+ string name = 1 [(gogoproto.jsontag) = "name"];
+ // Number of lines in the file
+ int32 lines = 2 [(gogoproto.jsontag) = "lines"];
+ // When the file was last modified
+ google.protobuf.Timestamp mtime = 3 [(gogoproto.jsontag) = "mtime"];
+ // File permissions (e.g. 0644)
+ string permissions = 4 [(gogoproto.jsontag) = "permissions"];
+ // Size of the file in bytes
+ int64 size = 5 [(gogoproto.jsontag) = "size"];
+ // The contents of the file in bytes
+ bytes contents = 6 [(gogoproto.jsontag) = "contents"];
+ // Action enum
+ enum Action {
+ // Default value
+ unset = 0;
+ // No changes to the file
+ unchanged = 1;
+ // New file
+ add = 2;
+ // Updated file
+ update = 3;
+ // File deleted
+ delete = 4;
+ }
+ // Action to take on the file (e.g. update, delete, etc)
+ Action action = 7;
+}
+
+// Represents a directory
+message Directory {
+ // Name of the directory
+ string name = 1;
+ // When the directory was last modified
+ google.protobuf.Timestamp mtime = 2;
+ // Directory permissions (e.g. 0644)
+ string permissions = 3;
+ // Size of the directory in bytes
+ int64 size = 4;
+ // List of files in the directory
+ repeated File files = 5;
+}
+
+// Represents a list of SSL certificates files
+message SslCertificates {
+ // List of SSL certificates
+ repeated SslCertificate ssl_certs = 1;
+}
+
+// Represents a SSL certificate file
+message SslCertificate {
+ // Name of the file
+ string file_name = 1 [(gogoproto.jsontag) = "fileName"];
+ // Size of the file in bytes
+ int64 size = 2 [(gogoproto.jsontag) = "size"];
+ // When the file was last modified
+ google.protobuf.Timestamp mtime = 3 [(gogoproto.jsontag) = "mtime"];
+ // A time when the certificate is valid
+ CertificateDates validity = 4 [(gogoproto.jsontag) = "validity"];
+ // This field contains the distinguished name (DN) of the certificate issuer
+ CertificateName issuer = 5 [(gogoproto.jsontag) = "issuer"];
+ // This dedicated object name associated with the public key, for which the certificate is issued
+ CertificateName subject = 6 [(gogoproto.jsontag) = "subject"];
+ // Subject alternative names that allows users to specify additional host names for the SSL certificate
+ repeated string subj_alt_names = 7 [(gogoproto.jsontag) = "subjectAltName"];
+ // Online Certificate Status Protocol URL
+ repeated string ocsp_url = 8 [(gogoproto.jsontag) = "ocspURL"];
+ // Public key encryption algorithm (e.g. RSA)
+ string public_key_algorithm = 9 [(gogoproto.jsontag) = "publicKeyAlgorithm"];
+ // The signature algorithm contain a hashing algorithm and an encryption algorithm (e.g. sha256RSA where sha256 is the hashing algorithm and RSA is the encryption algorithm)
+ string signature_algorithm = 10 [(gogoproto.jsontag) = "signatureAlgorithm"];
+ // Used to uniquely identify the certificate within a CA's systems
+ string serial_number = 11 [(gogoproto.jsontag) = "serialNumber"];
+ // The subject key identifier extension provides a means of identifying certificates that contain a particular public key
+ string subject_key_identifier = 12 [(gogoproto.jsontag) = "subjectKeyIdentifier"];
+ // SSL certificate fingerprint
+ string fingerprint = 13 [(gogoproto.jsontag) = "fingerprint"];
+ // SSL certificate fingerprint algorithm
+ string fingerprint_algorithm = 14 [(gogoproto.jsontag) = "fingerprintAlgorithm"];
+ // There are three versions of certificates: 1, 2 and 3, numbered as 0, 1 and 2. Version 1 supports only the basic fields; Version 2 adds unique identifiers, which represent two additional fields; Version 3 adds extensions.
+ int64 version = 15 [(gogoproto.jsontag) = "version"];
+ // The authority key identifier extension provides a means of identifying the Public Key corresponding to the Private Key used to sign a certificate
+ string authority_key_identifier = 16 [(gogoproto.jsontag) = "authorityKeyIdentifier"];
+}
+
+// Represents the dates for which a certificate is valid
+message CertificateDates {
+ // The start date that for when the certificate is valid
+ int64 not_before = 1 [(gogoproto.jsontag) = "notBefore"];
+ // The end date that for when the certificate is valid
+ int64 not_after = 2 [(gogoproto.jsontag) = "notAfter"];
+}
+
+// Represents a Distinguished Name (DN)
+message CertificateName {
+ // The fully qualified domain name (e.g. www.example.com)
+ string common_name = 1 [(gogoproto.jsontag) = "commonName"];
+ // Country
+ repeated string country = 2 [(gogoproto.jsontag) = "country"];
+ // State
+ repeated string state = 3 [(gogoproto.jsontag) = "state"];
+ // Locality
+ repeated string locality = 4 [(gogoproto.jsontag) = "locality"];
+ // Organization
+ repeated string organization = 5 [(gogoproto.jsontag) = "organization"];
+ // Organizational Unit
+ repeated string organizational_unit = 6 [(gogoproto.jsontag) = "organizationalUnit"];
+}
+
+// Represents a zipped file
+message ZippedFile {
+ // The contents of the file in bytes
+ bytes contents = 1 [(gogoproto.jsontag) = "contents"];
+ // File checksum
+ string checksum = 2 [(gogoproto.jsontag) = "checksum"];
+ // The directory where the file is located
+ string root_directory = 3 [(gogoproto.jsontag) = "root_directory"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.pb.go
new file mode 100644
index 000000000..74c563d70
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.pb.go
@@ -0,0 +1,382 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: common.proto
+
+package f5_nginx_agent_sdk_common
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Represents a dimension used in events
+type Dimension struct {
+ // Dimension name
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"`
+ // Dimension value
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Dimension) Reset() { *m = Dimension{} }
+func (m *Dimension) String() string { return proto.CompactTextString(m) }
+func (*Dimension) ProtoMessage() {}
+func (*Dimension) Descriptor() ([]byte, []int) {
+ return fileDescriptor_555bd8c177793206, []int{0}
+}
+func (m *Dimension) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Dimension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Dimension.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Dimension) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Dimension.Merge(m, src)
+}
+func (m *Dimension) XXX_Size() int {
+ return m.Size()
+}
+func (m *Dimension) XXX_DiscardUnknown() {
+ xxx_messageInfo_Dimension.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Dimension proto.InternalMessageInfo
+
+func (m *Dimension) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Dimension) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*Dimension)(nil), "f5.nginx.agent.sdk.common.Dimension")
+}
+
+func init() { proto.RegisterFile("common.proto", fileDescriptor_555bd8c177793206) }
+
+var fileDescriptor_555bd8c177793206 = []byte{
+ // 181 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xce, 0xcf, 0xcd,
+ 0xcd, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4c, 0x33, 0xd5, 0xcb, 0x4b, 0xcf,
+ 0xcc, 0xab, 0xd0, 0x4b, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x2b, 0x4e, 0xc9, 0xd6, 0x83, 0x28, 0x90,
+ 0xe2, 0x4a, 0xcf, 0x4f, 0xcf, 0x87, 0x28, 0x53, 0xf2, 0xe2, 0xe2, 0x74, 0xc9, 0xcc, 0x4d, 0xcd,
+ 0x2b, 0xce, 0xcc, 0xcf, 0x13, 0x92, 0xe1, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60,
+ 0xd4, 0xe0, 0x74, 0xe2, 0x78, 0x75, 0x4f, 0x1e, 0xcc, 0x0f, 0x02, 0x93, 0x42, 0xf2, 0x5c, 0xac,
+ 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x69, 0xce, 0x57, 0xf7, 0xe4, 0x21, 0x02, 0x41,
+ 0x10, 0xca, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63,
+ 0x8c, 0x72, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0x02, 0x59, 0xaa, 0x0f, 0x76, 0x88, 0x3e, 0xd8,
+ 0x21, 0xfa, 0xc5, 0x29, 0xd9, 0xfa, 0x65, 0x46, 0xfa, 0x60, 0xeb, 0xf5, 0x21, 0x2e, 0xb2, 0xc6,
+ 0xe9, 0xd6, 0x24, 0x36, 0xb0, 0x3a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x8b, 0xaf,
+ 0x09, 0xdd, 0x00, 0x00, 0x00,
+}
+
+func (m *Dimension) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Dimension) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Dimension) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintCommon(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintCommon(dAtA []byte, offset int, v uint64) int {
+ offset -= sovCommon(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Dimension) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovCommon(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovCommon(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozCommon(x uint64) (n int) {
+ return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Dimension) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Dimension: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Dimension: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommon
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommon(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommon
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipCommon(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommon
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthCommon
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupCommon
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCommon
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.proto
new file mode 100644
index 000000000..a85e2bcff
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/common/common.proto
@@ -0,0 +1,15 @@
+// Common messages
+syntax = "proto3";
+package f5.nginx.agent.sdk.common;
+
+import "gogo.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto/common;f5.nginx.agent.sdk.common";
+
+// Represents a dimension used in events
+message Dimension {
+ // Dimension name
+ string name = 1 [(gogoproto.jsontag) = "name"];
+ // Dimension value
+ string value = 2 [(gogoproto.jsontag) = "value"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.pb.go
new file mode 100644
index 000000000..e7bdcb53c
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.pb.go
@@ -0,0 +1,696 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: config.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Represents a config report
+type ConfigReport struct {
+ // Provides metadata information associated with the message
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ // List of NGINX config descriptors
+ Configs []*ConfigDescriptor `protobuf:"bytes,2,rep,name=configs,proto3" json:"configs"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConfigReport) Reset() { *m = ConfigReport{} }
+func (m *ConfigReport) String() string { return proto.CompactTextString(m) }
+func (*ConfigReport) ProtoMessage() {}
+func (*ConfigReport) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3eaf2c85e69e9ea4, []int{0}
+}
+func (m *ConfigReport) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigReport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfigReport.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfigReport) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigReport.Merge(m, src)
+}
+func (m *ConfigReport) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigReport) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigReport.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigReport proto.InternalMessageInfo
+
+func (m *ConfigReport) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *ConfigReport) GetConfigs() []*ConfigDescriptor {
+ if m != nil {
+ return m.Configs
+ }
+ return nil
+}
+
+// Represents a config descriptor
+type ConfigDescriptor struct {
+ // System ID
+ SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id"`
+ // NGINX ID
+ NginxId string `protobuf:"bytes,2,opt,name=nginx_id,json=nginxId,proto3" json:"nginx_id"`
+ // Config file checksum
+ Checksum string `protobuf:"bytes,3,opt,name=checksum,proto3" json:"checksum"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConfigDescriptor) Reset() { *m = ConfigDescriptor{} }
+func (m *ConfigDescriptor) String() string { return proto.CompactTextString(m) }
+func (*ConfigDescriptor) ProtoMessage() {}
+func (*ConfigDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3eaf2c85e69e9ea4, []int{1}
+}
+func (m *ConfigDescriptor) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfigDescriptor.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfigDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigDescriptor.Merge(m, src)
+}
+func (m *ConfigDescriptor) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigDescriptor proto.InternalMessageInfo
+
+func (m *ConfigDescriptor) GetSystemId() string {
+ if m != nil {
+ return m.SystemId
+ }
+ return ""
+}
+
+func (m *ConfigDescriptor) GetNginxId() string {
+ if m != nil {
+ return m.NginxId
+ }
+ return ""
+}
+
+func (m *ConfigDescriptor) GetChecksum() string {
+ if m != nil {
+ return m.Checksum
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*ConfigReport)(nil), "f5.nginx.agent.sdk.ConfigReport")
+ proto.RegisterType((*ConfigDescriptor)(nil), "f5.nginx.agent.sdk.ConfigDescriptor")
+}
+
+func init() { proto.RegisterFile("config.proto", fileDescriptor_3eaf2c85e69e9ea4) }
+
+var fileDescriptor_3eaf2c85e69e9ea4 = []byte{
+ // 298 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xce, 0xcf, 0x4b,
+ 0xcb, 0x4c, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0x33, 0xd5, 0xcb, 0x4b, 0xcf,
+ 0xcc, 0xab, 0xd0, 0x4b, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x2b, 0x4e, 0xc9, 0x96, 0xe2, 0x49, 0xce,
+ 0xcf, 0xcd, 0xcd, 0xcf, 0x83, 0xa8, 0x90, 0xe2, 0x4a, 0xcf, 0x4f, 0xcf, 0x87, 0xb0, 0x95, 0xa6,
+ 0x33, 0x72, 0xf1, 0x38, 0x83, 0xb5, 0x07, 0xa5, 0x16, 0xe4, 0x17, 0x95, 0x08, 0x59, 0x71, 0xb1,
+ 0xe4, 0xa6, 0x96, 0x24, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0xc9, 0xe8, 0x61, 0x9a, 0xa6,
+ 0xe7, 0x9b, 0x5a, 0x92, 0x98, 0x92, 0x58, 0x92, 0xe8, 0xc4, 0xf1, 0xea, 0x9e, 0x3c, 0x58, 0x75,
+ 0x10, 0x98, 0x14, 0xf2, 0xe6, 0x62, 0x87, 0x38, 0xa5, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb,
+ 0x48, 0x05, 0x9b, 0x76, 0x88, 0x75, 0x2e, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45,
+ 0x4e, 0xdc, 0xaf, 0xee, 0xc9, 0xc3, 0x34, 0x06, 0xc1, 0x18, 0x4a, 0x13, 0x19, 0xb9, 0x04, 0xd0,
+ 0x95, 0x0a, 0x69, 0x71, 0x71, 0x16, 0x57, 0x16, 0x97, 0xa4, 0xe6, 0xc6, 0x67, 0xa6, 0x80, 0x9d,
+ 0xc8, 0xe9, 0xc4, 0xfb, 0xea, 0x9e, 0x3c, 0x42, 0x30, 0x88, 0x03, 0xc2, 0xf4, 0x4c, 0x11, 0x52,
+ 0xe7, 0xe2, 0x00, 0x5b, 0x0d, 0x52, 0xca, 0x04, 0x56, 0xca, 0xf3, 0xea, 0x9e, 0x3c, 0x5c, 0x2c,
+ 0x88, 0x1d, 0xcc, 0xf2, 0x4c, 0x11, 0xd2, 0xe0, 0xe2, 0x48, 0xce, 0x48, 0x4d, 0xce, 0x2e, 0x2e,
+ 0xcd, 0x95, 0x60, 0x46, 0x28, 0x84, 0x89, 0x05, 0xc1, 0x59, 0x4e, 0xe6, 0x27, 0x1e, 0xc9, 0x31,
+ 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x66, 0x7a, 0x66, 0x49, 0x46, 0x69,
+ 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0xd8, 0x2c, 0x7d, 0xb0, 0x27, 0xf5, 0x8b, 0x53, 0xb2, 0xf5,
+ 0xcb, 0x8c, 0xf4, 0xc1, 0x01, 0x6c, 0x0d, 0x26, 0x93, 0xd8, 0xc0, 0x94, 0x31, 0x20, 0x00, 0x00,
+ 0xff, 0xff, 0x53, 0x4a, 0x06, 0x93, 0xab, 0x01, 0x00, 0x00,
+}
+
+func (m *ConfigReport) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigReport) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigReport) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Configs) > 0 {
+ for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Configs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConfig(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConfig(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigDescriptor) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigDescriptor) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigDescriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Checksum) > 0 {
+ i -= len(m.Checksum)
+ copy(dAtA[i:], m.Checksum)
+ i = encodeVarintConfig(dAtA, i, uint64(len(m.Checksum)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.NginxId) > 0 {
+ i -= len(m.NginxId)
+ copy(dAtA[i:], m.NginxId)
+ i = encodeVarintConfig(dAtA, i, uint64(len(m.NginxId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.SystemId) > 0 {
+ i -= len(m.SystemId)
+ copy(dAtA[i:], m.SystemId)
+ i = encodeVarintConfig(dAtA, i, uint64(len(m.SystemId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintConfig(dAtA []byte, offset int, v uint64) int {
+ offset -= sovConfig(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ConfigReport) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovConfig(uint64(l))
+ }
+ if len(m.Configs) > 0 {
+ for _, e := range m.Configs {
+ l = e.Size()
+ n += 1 + l + sovConfig(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ConfigDescriptor) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SystemId)
+ if l > 0 {
+ n += 1 + l + sovConfig(uint64(l))
+ }
+ l = len(m.NginxId)
+ if l > 0 {
+ n += 1 + l + sovConfig(uint64(l))
+ }
+ l = len(m.Checksum)
+ if l > 0 {
+ n += 1 + l + sovConfig(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovConfig(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozConfig(x uint64) (n int) {
+ return sovConfig(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ConfigReport) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigReport: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigReport: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConfig
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConfig
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Configs = append(m.Configs, &ConfigDescriptor{})
+ if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConfig(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigDescriptor) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigDescriptor: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigDescriptor: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConfig
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SystemId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConfig
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NginxId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConfig
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Checksum = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConfig(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConfig
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipConfig(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowConfig
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthConfig
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupConfig
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthConfig
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthConfig = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowConfig = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupConfig = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.proto
new file mode 100644
index 000000000..d44386e6d
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/config.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "common.proto";
+import "gogo.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents a config report
+message ConfigReport {
+ // Provides metadata information associated with the message
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+ // List of NGINX config descriptors
+ repeated ConfigDescriptor configs = 2 [(gogoproto.jsontag) = "configs"];
+}
+
+// Represents a config descriptor
+message ConfigDescriptor {
+ // System ID
+ string system_id = 1 [(gogoproto.jsontag) = "system_id"];
+ // NGINX ID
+ string nginx_id = 2 [(gogoproto.jsontag) = "nginx_id"];
+ // Config file checksum
+ string checksum = 3 [(gogoproto.jsontag) = "checksum"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.pb.go
new file mode 100644
index 000000000..c3628b516
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.pb.go
@@ -0,0 +1,481 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: dp_software_details.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Represents dataplane software details which contains details for additional software running on the dataplane that pertains to NGINX Agent
+type DataplaneSoftwareDetails struct {
+ // Types that are valid to be assigned to Data:
+ // *DataplaneSoftwareDetails_AppProtectWafDetails
+ // *DataplaneSoftwareDetails_NginxDetails
+ Data isDataplaneSoftwareDetails_Data `protobuf_oneof:"data"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DataplaneSoftwareDetails) Reset() { *m = DataplaneSoftwareDetails{} }
+func (m *DataplaneSoftwareDetails) String() string { return proto.CompactTextString(m) }
+func (*DataplaneSoftwareDetails) ProtoMessage() {}
+func (*DataplaneSoftwareDetails) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c38a59b96dc90da7, []int{0}
+}
+func (m *DataplaneSoftwareDetails) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataplaneSoftwareDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DataplaneSoftwareDetails.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DataplaneSoftwareDetails) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataplaneSoftwareDetails.Merge(m, src)
+}
+func (m *DataplaneSoftwareDetails) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataplaneSoftwareDetails) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataplaneSoftwareDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataplaneSoftwareDetails proto.InternalMessageInfo
+
+type isDataplaneSoftwareDetails_Data interface {
+ isDataplaneSoftwareDetails_Data()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type DataplaneSoftwareDetails_AppProtectWafDetails struct {
+ AppProtectWafDetails *AppProtectWAFDetails `protobuf:"bytes,1,opt,name=app_protect_waf_details,json=appProtectWafDetails,proto3,oneof" json:"app_protect_waf_details"`
+}
+type DataplaneSoftwareDetails_NginxDetails struct {
+ NginxDetails *NginxDetails `protobuf:"bytes,2,opt,name=nginx_details,json=nginxDetails,proto3,oneof" json:"nginx_details"`
+}
+
+func (*DataplaneSoftwareDetails_AppProtectWafDetails) isDataplaneSoftwareDetails_Data() {}
+func (*DataplaneSoftwareDetails_NginxDetails) isDataplaneSoftwareDetails_Data() {}
+
+func (m *DataplaneSoftwareDetails) GetData() isDataplaneSoftwareDetails_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *DataplaneSoftwareDetails) GetAppProtectWafDetails() *AppProtectWAFDetails {
+ if x, ok := m.GetData().(*DataplaneSoftwareDetails_AppProtectWafDetails); ok {
+ return x.AppProtectWafDetails
+ }
+ return nil
+}
+
+func (m *DataplaneSoftwareDetails) GetNginxDetails() *NginxDetails {
+ if x, ok := m.GetData().(*DataplaneSoftwareDetails_NginxDetails); ok {
+ return x.NginxDetails
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*DataplaneSoftwareDetails) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*DataplaneSoftwareDetails_AppProtectWafDetails)(nil),
+ (*DataplaneSoftwareDetails_NginxDetails)(nil),
+ }
+}
+
+func init() {
+ proto.RegisterType((*DataplaneSoftwareDetails)(nil), "f5.nginx.agent.sdk.DataplaneSoftwareDetails")
+}
+
+func init() { proto.RegisterFile("dp_software_details.proto", fileDescriptor_c38a59b96dc90da7) }
+
+var fileDescriptor_c38a59b96dc90da7 = []byte{
+ // 262 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0x29, 0x88, 0x2f,
+ 0xce, 0x4f, 0x2b, 0x29, 0x4f, 0x2c, 0x4a, 0x8d, 0x4f, 0x49, 0x2d, 0x49, 0xcc, 0xcc, 0x29, 0xd6,
+ 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0x33, 0xd5, 0xcb, 0x4b, 0xcf, 0xcc, 0xab, 0xd0,
+ 0x4b, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x2b, 0x4e, 0xc9, 0x96, 0xe2, 0x4a, 0xcf, 0x4f, 0xcf, 0x87,
+ 0xc8, 0x4b, 0x71, 0xe6, 0x25, 0x16, 0x40, 0x99, 0xdc, 0x10, 0x75, 0x60, 0x8e, 0xd2, 0x3f, 0x46,
+ 0x2e, 0x09, 0x97, 0xc4, 0x92, 0xc4, 0x82, 0x9c, 0xc4, 0xbc, 0xd4, 0x60, 0xa8, 0xd9, 0x2e, 0x10,
+ 0xa3, 0x85, 0xea, 0xb8, 0xc4, 0x13, 0x0b, 0x0a, 0xe2, 0x41, 0x2a, 0x53, 0x93, 0x4b, 0xe2, 0xcb,
+ 0x13, 0xd3, 0x60, 0xb6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x69, 0xe8, 0x61, 0x5a, 0xab,
+ 0xe7, 0x58, 0x50, 0x10, 0x00, 0xd1, 0x11, 0xee, 0xe8, 0x06, 0x35, 0xca, 0x49, 0xfa, 0xd5, 0x3d,
+ 0x79, 0x5c, 0x86, 0x79, 0x30, 0x04, 0x89, 0x24, 0x22, 0x34, 0x25, 0xa6, 0xc1, 0xec, 0x8f, 0xe2,
+ 0xe2, 0x05, 0x1b, 0x0e, 0xb7, 0x95, 0x09, 0x6c, 0xab, 0x02, 0x36, 0x5b, 0xfd, 0x40, 0x7c, 0x98,
+ 0x6d, 0x82, 0xaf, 0xee, 0xc9, 0xa3, 0x6a, 0xf5, 0x60, 0x08, 0xe2, 0xc9, 0x43, 0x56, 0xc2, 0xc6,
+ 0xc5, 0x92, 0x92, 0x58, 0x92, 0xe8, 0x64, 0x7e, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c,
+ 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0x69, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7,
+ 0xea, 0x83, 0x95, 0xeb, 0x83, 0x6d, 0xd0, 0x2f, 0x4e, 0xc9, 0xd6, 0x2f, 0x33, 0xd2, 0x07, 0x87,
+ 0x99, 0x35, 0x98, 0x4c, 0x62, 0x03, 0x53, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xcc,
+ 0xb1, 0x3e, 0x95, 0x01, 0x00, 0x00,
+}
+
+func (m *DataplaneSoftwareDetails) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataplaneSoftwareDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneSoftwareDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Data != nil {
+ {
+ size := m.Data.Size()
+ i -= size
+ if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataplaneSoftwareDetails_AppProtectWafDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneSoftwareDetails_AppProtectWafDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AppProtectWafDetails != nil {
+ {
+ size, err := m.AppProtectWafDetails.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDpSoftwareDetails(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *DataplaneSoftwareDetails_NginxDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataplaneSoftwareDetails_NginxDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NginxDetails != nil {
+ {
+ size, err := m.NginxDetails.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDpSoftwareDetails(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func encodeVarintDpSoftwareDetails(dAtA []byte, offset int, v uint64) int {
+ offset -= sovDpSoftwareDetails(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *DataplaneSoftwareDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Data != nil {
+ n += m.Data.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DataplaneSoftwareDetails_AppProtectWafDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AppProtectWafDetails != nil {
+ l = m.AppProtectWafDetails.Size()
+ n += 1 + l + sovDpSoftwareDetails(uint64(l))
+ }
+ return n
+}
+func (m *DataplaneSoftwareDetails_NginxDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NginxDetails != nil {
+ l = m.NginxDetails.Size()
+ n += 1 + l + sovDpSoftwareDetails(uint64(l))
+ }
+ return n
+}
+
+func sovDpSoftwareDetails(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozDpSoftwareDetails(x uint64) (n int) {
+ return sovDpSoftwareDetails(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *DataplaneSoftwareDetails) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDpSoftwareDetails
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataplaneSoftwareDetails: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataplaneSoftwareDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AppProtectWafDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDpSoftwareDetails
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDpSoftwareDetails
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthDpSoftwareDetails
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &AppProtectWAFDetails{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &DataplaneSoftwareDetails_AppProtectWafDetails{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxDetails", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowDpSoftwareDetails
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthDpSoftwareDetails
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthDpSoftwareDetails
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &NginxDetails{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &DataplaneSoftwareDetails_NginxDetails{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipDpSoftwareDetails(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthDpSoftwareDetails
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipDpSoftwareDetails(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDpSoftwareDetails
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDpSoftwareDetails
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowDpSoftwareDetails
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthDpSoftwareDetails
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupDpSoftwareDetails
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthDpSoftwareDetails
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthDpSoftwareDetails = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDpSoftwareDetails = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupDpSoftwareDetails = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.proto
new file mode 100644
index 000000000..891a57266
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/dp_software_details.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "gogo.proto";
+import "nap.proto";
+import "nginx.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents dataplane software details which contains details for additional software running on the dataplane that pertains to NGINX Agent
+message DataplaneSoftwareDetails {
+ oneof data {
+ // App Protect WAF software details
+ AppProtectWAFDetails app_protect_waf_details = 1 [(gogoproto.jsontag) = "app_protect_waf_details"];
+ // NGINX software details
+ NginxDetails nginx_details = 2 [(gogoproto.jsontag) = "nginx_details"];
+ }
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go
new file mode 100644
index 000000000..e054b5c00
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go
@@ -0,0 +1,4777 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: event.proto
+
+package f5_nginx_agent_sdk_events
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ types "github.com/gogo/protobuf/types"
+ common "github.com/nginx/agent/sdk/v2/proto/common"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Represents the metadata for an event
+type Metadata struct {
+ // Module is the process that generate the event
+ Module string `protobuf:"bytes,1,opt,name=Module,proto3" json:"module"`
+ // UUID is a unique identifier for each event
+ UUID string `protobuf:"bytes,2,opt,name=UUID,proto3" json:"uuid"`
+ // CorrelationID is an ID used by the producer of the message to track the flow of events
+ CorrelationID string `protobuf:"bytes,3,opt,name=CorrelationID,proto3" json:"correlation_id"`
+ // Timestamp defines the time of event generation
+ Timestamp *types.Timestamp `protobuf:"bytes,4,opt,name=Timestamp,proto3" json:"timestamp"`
+ // EventLevel defines the criticality of event
+ EventLevel string `protobuf:"bytes,5,opt,name=EventLevel,proto3" json:"event_level"`
+ // Type is used to identify the event type for further processing
+ Type string `protobuf:"bytes,6,opt,name=Type,proto3" json:"type"`
+ // Category is used for classifying the event type into a higher level entity
+ Category string `protobuf:"bytes,7,opt,name=Category,proto3" json:"category"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{0}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func (m *Metadata) GetModule() string {
+ if m != nil {
+ return m.Module
+ }
+ return ""
+}
+
+func (m *Metadata) GetUUID() string {
+ if m != nil {
+ return m.UUID
+ }
+ return ""
+}
+
+func (m *Metadata) GetCorrelationID() string {
+ if m != nil {
+ return m.CorrelationID
+ }
+ return ""
+}
+
+func (m *Metadata) GetTimestamp() *types.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *Metadata) GetEventLevel() string {
+ if m != nil {
+ return m.EventLevel
+ }
+ return ""
+}
+
+func (m *Metadata) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *Metadata) GetCategory() string {
+ if m != nil {
+ return m.Category
+ }
+ return ""
+}
+
+// Represents an event
+type Event struct {
+ // Event metadata
+ Metadata *Metadata `protobuf:"bytes,1,opt,name=Metadata,proto3" json:"metadata"`
+ // Types that are valid to be assigned to Data:
+ //
+ // *Event_ActivityEvent
+ // *Event_SecurityViolationEvent
+ Data isEvent_Data `protobuf_oneof:"data"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{1}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+ return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+ xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+type isEvent_Data interface {
+ isEvent_Data()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Event_ActivityEvent struct {
+ ActivityEvent *ActivityEvent `protobuf:"bytes,2,opt,name=ActivityEvent,proto3,oneof" json:"activity_event"`
+}
+type Event_SecurityViolationEvent struct {
+ SecurityViolationEvent *SecurityViolationEvent `protobuf:"bytes,3,opt,name=SecurityViolationEvent,proto3,oneof" json:"security_violation_event"`
+}
+
+func (*Event_ActivityEvent) isEvent_Data() {}
+func (*Event_SecurityViolationEvent) isEvent_Data() {}
+
+func (m *Event) GetData() isEvent_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *Event) GetMetadata() *Metadata {
+ if m != nil {
+ return m.Metadata
+ }
+ return nil
+}
+
+func (m *Event) GetActivityEvent() *ActivityEvent {
+ if x, ok := m.GetData().(*Event_ActivityEvent); ok {
+ return x.ActivityEvent
+ }
+ return nil
+}
+
+func (m *Event) GetSecurityViolationEvent() *SecurityViolationEvent {
+ if x, ok := m.GetData().(*Event_SecurityViolationEvent); ok {
+ return x.SecurityViolationEvent
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Event) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Event_ActivityEvent)(nil),
+ (*Event_SecurityViolationEvent)(nil),
+ }
+}
+
+// Represents an event report
+type EventReport struct {
+ // Array of events
+ Events []*Event `protobuf:"bytes,1,rep,name=Events,proto3" json:"events"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EventReport) Reset() { *m = EventReport{} }
+func (m *EventReport) String() string { return proto.CompactTextString(m) }
+func (*EventReport) ProtoMessage() {}
+func (*EventReport) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{2}
+}
+func (m *EventReport) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EventReport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_EventReport.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *EventReport) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EventReport.Merge(m, src)
+}
+func (m *EventReport) XXX_Size() int {
+ return m.Size()
+}
+func (m *EventReport) XXX_DiscardUnknown() {
+ xxx_messageInfo_EventReport.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventReport proto.InternalMessageInfo
+
+func (m *EventReport) GetEvents() []*Event {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+// Represents an activity event
+type ActivityEvent struct {
+ // Activtiy event message
+ Message string `protobuf:"bytes,1,opt,name=Message,proto3" json:"message"`
+ // Array of dimensions
+ Dimensions []*common.Dimension `protobuf:"bytes,2,rep,name=Dimensions,proto3" json:"dimensions"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ActivityEvent) Reset() { *m = ActivityEvent{} }
+func (m *ActivityEvent) String() string { return proto.CompactTextString(m) }
+func (*ActivityEvent) ProtoMessage() {}
+func (*ActivityEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{3}
+}
+func (m *ActivityEvent) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ActivityEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ActivityEvent.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ActivityEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ActivityEvent.Merge(m, src)
+}
+func (m *ActivityEvent) XXX_Size() int {
+ return m.Size()
+}
+func (m *ActivityEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_ActivityEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ActivityEvent proto.InternalMessageInfo
+
+func (m *ActivityEvent) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *ActivityEvent) GetDimensions() []*common.Dimension {
+ if m != nil {
+ return m.Dimensions
+ }
+ return nil
+}
+
+// Represents a security violation that is emitted by the agent
+type SecurityViolationEvent struct {
+ PolicyName string `protobuf:"bytes,1,opt,name=PolicyName,proto3" json:"policy_name"`
+ SupportID string `protobuf:"bytes,2,opt,name=SupportID,proto3" json:"support_id"`
+ Outcome string `protobuf:"bytes,3,opt,name=Outcome,proto3" json:"outcome"`
+ OutcomeReason string `protobuf:"bytes,4,opt,name=OutcomeReason,proto3" json:"outcome_reason"`
+ BlockingExceptionReason string `protobuf:"bytes,5,opt,name=BlockingExceptionReason,proto3" json:"blocking_exception_reason"`
+ Method string `protobuf:"bytes,6,opt,name=Method,proto3" json:"method"`
+ Protocol string `protobuf:"bytes,7,opt,name=Protocol,proto3" json:"protocol"`
+ XForwardedForHeaderValue string `protobuf:"bytes,8,opt,name=XForwardedForHeaderValue,proto3" json:"xff_header_value"`
+ URI string `protobuf:"bytes,9,opt,name=URI,proto3" json:"uri"`
+ Request string `protobuf:"bytes,10,opt,name=Request,proto3" json:"request"`
+ IsTruncated string `protobuf:"bytes,11,opt,name=IsTruncated,proto3" json:"is_truncated"`
+ RequestStatus string `protobuf:"bytes,12,opt,name=RequestStatus,proto3" json:"request_status"`
+ ResponseCode string `protobuf:"bytes,13,opt,name=ResponseCode,proto3" json:"response_code"`
+ ServerAddr string `protobuf:"bytes,14,opt,name=ServerAddr,proto3" json:"server_addr"`
+ VSName string `protobuf:"bytes,15,opt,name=VSName,proto3" json:"vs_name"`
+ RemoteAddr string `protobuf:"bytes,16,opt,name=RemoteAddr,proto3" json:"remote_addr"`
+ RemotePort string `protobuf:"bytes,17,opt,name=RemotePort,proto3" json:"destination_port"`
+ ServerPort string `protobuf:"bytes,18,opt,name=ServerPort,proto3" json:"server_port"`
+ Violations string `protobuf:"bytes,19,opt,name=Violations,proto3" json:"violations"`
+ SubViolations string `protobuf:"bytes,20,opt,name=SubViolations,proto3" json:"sub_violations"`
+ ViolationRating string `protobuf:"bytes,21,opt,name=ViolationRating,proto3" json:"violation_rating"`
+ SigSetNames string `protobuf:"bytes,22,opt,name=SigSetNames,proto3" json:"sig_set_names"`
+ SigCVEs string `protobuf:"bytes,23,opt,name=SigCVEs,proto3" json:"sig_cves"`
+ ClientClass string `protobuf:"bytes,24,opt,name=ClientClass,proto3" json:"client_class"`
+ ClientApplication string `protobuf:"bytes,25,opt,name=ClientApplication,proto3" json:"client_application"`
+ ClientApplicationVersion string `protobuf:"bytes,26,opt,name=ClientApplicationVersion,proto3" json:"client_application_version"`
+ Severity string `protobuf:"bytes,27,opt,name=Severity,proto3" json:"severity"`
+ ThreatCampaignNames string `protobuf:"bytes,28,opt,name=ThreatCampaignNames,proto3" json:"threat_campaign_names"`
+ BotAnomalies string `protobuf:"bytes,29,opt,name=BotAnomalies,proto3" json:"bot_anomalies"`
+ BotCategory string `protobuf:"bytes,30,opt,name=BotCategory,proto3" json:"bot_category"`
+ EnforcedBotAnomalies string `protobuf:"bytes,31,opt,name=EnforcedBotAnomalies,proto3" json:"enforced_bot_anomalies"`
+ BotSignatureName string `protobuf:"bytes,32,opt,name=BotSignatureName,proto3" json:"bot_signature_name"`
+ ViolationContexts string `protobuf:"bytes,33,opt,name=ViolationContexts,proto3" json:"violation_contexts"`
+ ViolationsData []*ViolationData `protobuf:"bytes,34,rep,name=ViolationsData,proto3" json:"violations_data"`
+ SystemID string `protobuf:"bytes,35,opt,name=SystemID,proto3" json:"system_id"`
+ InstanceTags string `protobuf:"bytes,36,opt,name=InstanceTags,proto3" json:"instance_tags"`
+ InstanceGroup string `protobuf:"bytes,37,opt,name=InstanceGroup,proto3" json:"instance_group"`
+ DisplayName string `protobuf:"bytes,38,opt,name=DisplayName,proto3" json:"display_name"`
+ NginxID string `protobuf:"bytes,39,opt,name=NginxID,proto3" json:"nginx_id"`
+ ParentHostname string `protobuf:"bytes,40,opt,name=ParentHostname,proto3" json:"parent_hostname"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SecurityViolationEvent) Reset() { *m = SecurityViolationEvent{} }
+func (m *SecurityViolationEvent) String() string { return proto.CompactTextString(m) }
+func (*SecurityViolationEvent) ProtoMessage() {}
+func (*SecurityViolationEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{4}
+}
+func (m *SecurityViolationEvent) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecurityViolationEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SecurityViolationEvent.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SecurityViolationEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecurityViolationEvent.Merge(m, src)
+}
+func (m *SecurityViolationEvent) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecurityViolationEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecurityViolationEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecurityViolationEvent proto.InternalMessageInfo
+
+func (m *SecurityViolationEvent) GetPolicyName() string {
+ if m != nil {
+ return m.PolicyName
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetSupportID() string {
+ if m != nil {
+ return m.SupportID
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetOutcome() string {
+ if m != nil {
+ return m.Outcome
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetOutcomeReason() string {
+ if m != nil {
+ return m.OutcomeReason
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetBlockingExceptionReason() string {
+ if m != nil {
+ return m.BlockingExceptionReason
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetMethod() string {
+ if m != nil {
+ return m.Method
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetProtocol() string {
+ if m != nil {
+ return m.Protocol
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetXForwardedForHeaderValue() string {
+ if m != nil {
+ return m.XForwardedForHeaderValue
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetURI() string {
+ if m != nil {
+ return m.URI
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetRequest() string {
+ if m != nil {
+ return m.Request
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetIsTruncated() string {
+ if m != nil {
+ return m.IsTruncated
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetRequestStatus() string {
+ if m != nil {
+ return m.RequestStatus
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetResponseCode() string {
+ if m != nil {
+ return m.ResponseCode
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetServerAddr() string {
+ if m != nil {
+ return m.ServerAddr
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetVSName() string {
+ if m != nil {
+ return m.VSName
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetRemoteAddr() string {
+ if m != nil {
+ return m.RemoteAddr
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetRemotePort() string {
+ if m != nil {
+ return m.RemotePort
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetServerPort() string {
+ if m != nil {
+ return m.ServerPort
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetViolations() string {
+ if m != nil {
+ return m.Violations
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetSubViolations() string {
+ if m != nil {
+ return m.SubViolations
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetViolationRating() string {
+ if m != nil {
+ return m.ViolationRating
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetSigSetNames() string {
+ if m != nil {
+ return m.SigSetNames
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetSigCVEs() string {
+ if m != nil {
+ return m.SigCVEs
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetClientClass() string {
+ if m != nil {
+ return m.ClientClass
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetClientApplication() string {
+ if m != nil {
+ return m.ClientApplication
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetClientApplicationVersion() string {
+ if m != nil {
+ return m.ClientApplicationVersion
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetSeverity() string {
+ if m != nil {
+ return m.Severity
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetThreatCampaignNames() string {
+ if m != nil {
+ return m.ThreatCampaignNames
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetBotAnomalies() string {
+ if m != nil {
+ return m.BotAnomalies
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetBotCategory() string {
+ if m != nil {
+ return m.BotCategory
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetEnforcedBotAnomalies() string {
+ if m != nil {
+ return m.EnforcedBotAnomalies
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetBotSignatureName() string {
+ if m != nil {
+ return m.BotSignatureName
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetViolationContexts() string {
+ if m != nil {
+ return m.ViolationContexts
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetViolationsData() []*ViolationData {
+ if m != nil {
+ return m.ViolationsData
+ }
+ return nil
+}
+
+func (m *SecurityViolationEvent) GetSystemID() string {
+ if m != nil {
+ return m.SystemID
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetInstanceTags() string {
+ if m != nil {
+ return m.InstanceTags
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetInstanceGroup() string {
+ if m != nil {
+ return m.InstanceGroup
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetNginxID() string {
+ if m != nil {
+ return m.NginxID
+ }
+ return ""
+}
+
+func (m *SecurityViolationEvent) GetParentHostname() string {
+ if m != nil {
+ return m.ParentHostname
+ }
+ return ""
+}
+
+type SignatureData struct {
+ ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"sig_data_id"`
+ BlockingMask string `protobuf:"bytes,2,opt,name=BlockingMask,proto3" json:"sig_data_blocking_mask"`
+ Buffer string `protobuf:"bytes,3,opt,name=Buffer,proto3" json:"sig_data_buffer"`
+ Offset string `protobuf:"bytes,4,opt,name=Offset,proto3" json:"sig_data_offset"`
+ Length string `protobuf:"bytes,5,opt,name=Length,proto3" json:"sig_data_length"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SignatureData) Reset() { *m = SignatureData{} }
+func (m *SignatureData) String() string { return proto.CompactTextString(m) }
+func (*SignatureData) ProtoMessage() {}
+func (*SignatureData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{5}
+}
+func (m *SignatureData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SignatureData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SignatureData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureData.Merge(m, src)
+}
+func (m *SignatureData) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureData) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureData proto.InternalMessageInfo
+
+func (m *SignatureData) GetID() string {
+ if m != nil {
+ return m.ID
+ }
+ return ""
+}
+
+func (m *SignatureData) GetBlockingMask() string {
+ if m != nil {
+ return m.BlockingMask
+ }
+ return ""
+}
+
+func (m *SignatureData) GetBuffer() string {
+ if m != nil {
+ return m.Buffer
+ }
+ return ""
+}
+
+func (m *SignatureData) GetOffset() string {
+ if m != nil {
+ return m.Offset
+ }
+ return ""
+}
+
+func (m *SignatureData) GetLength() string {
+ if m != nil {
+ return m.Length
+ }
+ return ""
+}
+
+type ContextData struct {
+ Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"parameter_data_name"`
+ Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"parameter_data_value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ContextData) Reset() { *m = ContextData{} }
+func (m *ContextData) String() string { return proto.CompactTextString(m) }
+func (*ContextData) ProtoMessage() {}
+func (*ContextData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{6}
+}
+func (m *ContextData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ContextData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ContextData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ContextData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ContextData.Merge(m, src)
+}
+func (m *ContextData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ContextData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ContextData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContextData proto.InternalMessageInfo
+
+func (m *ContextData) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ContextData) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+type ViolationData struct {
+ Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"violation_data_name"`
+ Context string `protobuf:"bytes,2,opt,name=Context,proto3" json:"violation_data_context"`
+ ContextData *ContextData `protobuf:"bytes,3,opt,name=ContextData,proto3" json:"violation_data_context_data"`
+ Signatures []*SignatureData `protobuf:"bytes,4,rep,name=Signatures,proto3" json:"violation_data_signatures"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ViolationData) Reset() { *m = ViolationData{} }
+func (m *ViolationData) String() string { return proto.CompactTextString(m) }
+func (*ViolationData) ProtoMessage() {}
+func (*ViolationData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d17a9d3f0ddf27e, []int{7}
+}
+func (m *ViolationData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ViolationData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ViolationData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ViolationData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ViolationData.Merge(m, src)
+}
+func (m *ViolationData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ViolationData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ViolationData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ViolationData proto.InternalMessageInfo
+
+func (m *ViolationData) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ViolationData) GetContext() string {
+ if m != nil {
+ return m.Context
+ }
+ return ""
+}
+
+func (m *ViolationData) GetContextData() *ContextData {
+ if m != nil {
+ return m.ContextData
+ }
+ return nil
+}
+
+func (m *ViolationData) GetSignatures() []*SignatureData {
+ if m != nil {
+ return m.Signatures
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Metadata)(nil), "f5.nginx.agent.sdk.events.Metadata")
+ proto.RegisterType((*Event)(nil), "f5.nginx.agent.sdk.events.Event")
+ proto.RegisterType((*EventReport)(nil), "f5.nginx.agent.sdk.events.EventReport")
+ proto.RegisterType((*ActivityEvent)(nil), "f5.nginx.agent.sdk.events.ActivityEvent")
+ proto.RegisterType((*SecurityViolationEvent)(nil), "f5.nginx.agent.sdk.events.SecurityViolationEvent")
+ proto.RegisterType((*SignatureData)(nil), "f5.nginx.agent.sdk.events.SignatureData")
+ proto.RegisterType((*ContextData)(nil), "f5.nginx.agent.sdk.events.ContextData")
+ proto.RegisterType((*ViolationData)(nil), "f5.nginx.agent.sdk.events.ViolationData")
+}
+
+func init() { proto.RegisterFile("event.proto", fileDescriptor_2d17a9d3f0ddf27e) }
+
+var fileDescriptor_2d17a9d3f0ddf27e = []byte{
+ // 1647 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x57, 0xdd, 0x6e, 0xdc, 0xc6,
+ 0x15, 0xee, 0xca, 0xb2, 0x7e, 0x66, 0x25, 0xd9, 0x1e, 0x39, 0xf6, 0x48, 0x71, 0x44, 0x75, 0x9d,
+ 0xb8, 0x2a, 0x52, 0xec, 0xa2, 0x4a, 0x02, 0x14, 0x08, 0x10, 0xc0, 0xab, 0x75, 0x92, 0x45, 0x23,
+ 0x47, 0x98, 0x95, 0xd5, 0x22, 0x37, 0xc4, 0x88, 0x9c, 0xa5, 0x58, 0xed, 0x72, 0x58, 0xce, 0x70,
+ 0x6b, 0x3d, 0x42, 0x81, 0x3e, 0x57, 0x91, 0xcb, 0xdc, 0x17, 0x20, 0x0a, 0x5f, 0xf2, 0x05, 0x7a,
+ 0x5b, 0xcc, 0x99, 0x21, 0x87, 0xd4, 0x5f, 0xae, 0x6c, 0x9e, 0xef, 0xfb, 0xce, 0x99, 0x39, 0x3f,
+ 0xb3, 0x47, 0xa8, 0xcb, 0x17, 0x3c, 0x51, 0xfd, 0x34, 0x13, 0x4a, 0xe0, 0x9d, 0xe9, 0x57, 0xfd,
+ 0x24, 0x8a, 0x93, 0xf7, 0x7d, 0x16, 0x69, 0xab, 0x0c, 0x2f, 0xfb, 0x80, 0xcb, 0xdd, 0xed, 0x40,
+ 0xcc, 0xe7, 0x22, 0x19, 0x98, 0x7f, 0x0c, 0x7f, 0x17, 0x45, 0x22, 0x12, 0xf6, 0xff, 0x5e, 0x24,
+ 0x44, 0x34, 0xe3, 0x03, 0xf8, 0x3a, 0xcf, 0xa7, 0x03, 0x15, 0xcf, 0xb9, 0x54, 0x6c, 0x9e, 0x1a,
+ 0x42, 0xef, 0xe7, 0x25, 0xb4, 0x76, 0xcc, 0x15, 0x0b, 0x99, 0x62, 0xb8, 0x87, 0x56, 0x8e, 0x45,
+ 0x98, 0xcf, 0x38, 0xe9, 0xec, 0x77, 0x0e, 0xd6, 0x87, 0xa8, 0x2c, 0xbc, 0x95, 0x39, 0x58, 0xa8,
+ 0x45, 0xf0, 0x0b, 0xb4, 0xfc, 0xee, 0xdd, 0x78, 0x44, 0x96, 0x80, 0xb1, 0x56, 0x16, 0xde, 0x72,
+ 0x9e, 0xc7, 0x21, 0x05, 0x2b, 0xfe, 0x13, 0xda, 0x3c, 0x12, 0x59, 0xc6, 0x67, 0x4c, 0xc5, 0x22,
+ 0x19, 0x8f, 0xc8, 0x03, 0xa0, 0xe1, 0xb2, 0xf0, 0xb6, 0x02, 0x07, 0xf8, 0x71, 0x48, 0xdb, 0x44,
+ 0xfc, 0x1d, 0x5a, 0x3f, 0xad, 0xce, 0x46, 0x96, 0xf7, 0x3b, 0x07, 0xdd, 0xc3, 0xdd, 0xbe, 0x39,
+ 0x7d, 0xbf, 0x3a, 0x7d, 0xbf, 0x66, 0x0c, 0x37, 0xcb, 0xc2, 0x5b, 0xaf, 0x2f, 0x43, 0x9d, 0x16,
+ 0x0f, 0x10, 0x7a, 0xa3, 0xb3, 0xf3, 0x03, 0x5f, 0xf0, 0x19, 0x79, 0x08, 0xf1, 0x1f, 0x95, 0x85,
+ 0x67, 0x72, 0xea, 0xcf, 0xb4, 0x99, 0x36, 0x28, 0xfa, 0x46, 0xa7, 0x57, 0x29, 0x27, 0x2b, 0xee,
+ 0x46, 0xea, 0x2a, 0xe5, 0x14, 0xac, 0xf8, 0x00, 0xad, 0x1d, 0x31, 0xc5, 0x23, 0x91, 0x5d, 0x91,
+ 0x55, 0x60, 0x6c, 0x94, 0x85, 0xb7, 0x16, 0x58, 0x1b, 0xad, 0xd1, 0xde, 0x2f, 0x4b, 0xe8, 0x21,
+ 0xb8, 0xc5, 0xc7, 0x2e, 0xa7, 0x90, 0xc9, 0xee, 0xe1, 0xcb, 0xfe, 0x9d, 0x45, 0xec, 0x57, 0x54,
+ 0xe3, 0x78, 0x6e, 0xbf, 0xa8, 0x2b, 0x4b, 0x88, 0x36, 0x5f, 0x07, 0x2a, 0x5e, 0xc4, 0xea, 0x0a,
+ 0xfc, 0x43, 0xee, 0xbb, 0x87, 0x07, 0xf7, 0xf8, 0x6c, 0xf1, 0x4d, 0xfa, 0x99, 0x35, 0xf9, 0x40,
+ 0xf9, 0xfe, 0x37, 0xb4, 0xed, 0x14, 0xff, 0xb3, 0x83, 0x9e, 0x4d, 0x78, 0x90, 0x67, 0xb1, 0xba,
+ 0x3a, 0x8b, 0x85, 0x29, 0x8c, 0x89, 0xf7, 0x00, 0xe2, 0xfd, 0xf1, 0x9e, 0x78, 0xb7, 0x0b, 0x87,
+ 0x2f, 0xca, 0xc2, 0x23, 0xd2, 0x62, 0xfe, 0xa2, 0x02, 0xeb, 0x23, 0xdc, 0x11, 0x70, 0xb8, 0x82,
+ 0x96, 0xf5, 0xcd, 0x7b, 0x13, 0xd4, 0x05, 0x03, 0xe5, 0xa9, 0xc8, 0x14, 0x1e, 0xa1, 0x15, 0xf8,
+ 0x94, 0xa4, 0xb3, 0xff, 0xe0, 0xa0, 0x7b, 0xb8, 0x7f, 0xcf, 0x89, 0x8c, 0x23, 0xe8, 0x60, 0x63,
+ 0xa1, 0x56, 0xdb, 0xfb, 0x57, 0xe7, 0x5a, 0x3e, 0xf1, 0x67, 0x68, 0xf5, 0x98, 0x4b, 0xc9, 0xa2,
+ 0xaa, 0xf1, 0xbb, 0x65, 0xe1, 0xad, 0xce, 0x8d, 0x89, 0x56, 0x18, 0x3e, 0x45, 0x68, 0x14, 0xcf,
+ 0x79, 0x22, 0x63, 0x91, 0x48, 0xb2, 0x04, 0x47, 0xf8, 0xf4, 0xb6, 0x23, 0xd8, 0x71, 0xac, 0xc9,
+ 0xc3, 0xad, 0xb2, 0xf0, 0x50, 0x58, 0x6b, 0x69, 0xc3, 0x4f, 0xef, 0x3f, 0x4f, 0xee, 0xca, 0xbb,
+ 0x6e, 0xe5, 0x13, 0x31, 0x8b, 0x83, 0xab, 0xb7, 0x6c, 0x5e, 0x1d, 0x0d, 0x5a, 0x39, 0x05, 0xab,
+ 0x9f, 0xb0, 0x39, 0xa7, 0x0d, 0x0a, 0xfe, 0x03, 0x5a, 0x9f, 0xe4, 0xa9, 0xce, 0x55, 0x3d, 0xa1,
+ 0x10, 0x5a, 0x1a, 0xa3, 0x1e, 0x3b, 0x47, 0xd0, 0xd7, 0xfe, 0x31, 0x57, 0x81, 0x98, 0x73, 0x3b,
+ 0xa6, 0x70, 0x6d, 0x61, 0x4c, 0xb4, 0xc2, 0xf4, 0x4c, 0xdb, 0xff, 0x52, 0xce, 0xa4, 0x48, 0x60,
+ 0x3a, 0xed, 0x4c, 0x5b, 0xb2, 0x9f, 0x01, 0x42, 0xdb, 0x44, 0xfc, 0x17, 0xf4, 0x7c, 0x38, 0x13,
+ 0xc1, 0x65, 0x9c, 0x44, 0x6f, 0xde, 0x07, 0x3c, 0xd5, 0x37, 0xb3, 0x3e, 0xcc, 0x5c, 0x7e, 0x52,
+ 0x16, 0xde, 0xce, 0xb9, 0xa5, 0xf8, 0xbc, 0xe2, 0x54, 0xee, 0xee, 0x52, 0xc3, 0x43, 0xc5, 0xd5,
+ 0x85, 0x08, 0xed, 0xd0, 0x9a, 0x87, 0x0a, 0x2c, 0xd4, 0x22, 0x7a, 0x70, 0x4f, 0xf4, 0xbb, 0x11,
+ 0x88, 0x59, 0x73, 0x70, 0x53, 0x6b, 0xa3, 0x35, 0x8a, 0x4f, 0x10, 0xf9, 0xeb, 0xb7, 0x22, 0xfb,
+ 0x07, 0xcb, 0x42, 0x1e, 0x7e, 0x2b, 0xb2, 0xef, 0x39, 0x0b, 0x79, 0x76, 0xc6, 0x66, 0x39, 0x27,
+ 0x6b, 0xa0, 0x7c, 0x5a, 0x16, 0xde, 0xe3, 0xf7, 0xd3, 0xa9, 0x7f, 0x01, 0x90, 0xbf, 0xd0, 0x18,
+ 0xbd, 0x53, 0x85, 0x77, 0xd0, 0x83, 0x77, 0x74, 0x4c, 0xd6, 0x41, 0xbc, 0x5a, 0x16, 0xde, 0x83,
+ 0x3c, 0x8b, 0xa9, 0xb6, 0xe9, 0xa4, 0x53, 0xfe, 0xf7, 0x9c, 0x4b, 0x45, 0x90, 0x4b, 0x7a, 0x66,
+ 0x4c, 0xb4, 0xc2, 0xf0, 0x21, 0xea, 0x8e, 0xe5, 0x69, 0x96, 0x27, 0xfa, 0xa1, 0x09, 0x49, 0x17,
+ 0xa8, 0x8f, 0xcb, 0xc2, 0xdb, 0x88, 0xa5, 0xaf, 0x2a, 0x3b, 0x6d, 0x92, 0x74, 0xa1, 0xac, 0x7c,
+ 0xa2, 0x98, 0xca, 0x25, 0xd9, 0x70, 0x85, 0xb2, 0x01, 0x7c, 0x09, 0x08, 0x6d, 0x13, 0xf1, 0x57,
+ 0x68, 0x83, 0x72, 0x99, 0x8a, 0x44, 0xf2, 0x23, 0x11, 0x72, 0xb2, 0x09, 0xc2, 0x27, 0x65, 0xe1,
+ 0x6d, 0x66, 0xd6, 0xee, 0x07, 0x22, 0xe4, 0xb4, 0x45, 0xd3, 0xfd, 0x39, 0xe1, 0xd9, 0x82, 0x67,
+ 0xaf, 0xc3, 0x30, 0x23, 0x5b, 0xae, 0x3f, 0x25, 0x58, 0x7d, 0x16, 0x86, 0x19, 0x6d, 0x50, 0xf0,
+ 0x4b, 0xb4, 0x72, 0x36, 0x81, 0x66, 0x7e, 0xe4, 0xee, 0xbe, 0x90, 0xa6, 0x91, 0x2d, 0xa4, 0xbd,
+ 0x52, 0x3e, 0x17, 0x8a, 0x83, 0xd7, 0xc7, 0xce, 0x6b, 0x06, 0x56, 0xeb, 0xd5, 0x51, 0xf0, 0x97,
+ 0x95, 0xe0, 0x44, 0x64, 0x8a, 0x3c, 0x71, 0x15, 0x0b, 0xb9, 0x54, 0x71, 0x62, 0x9e, 0x1c, 0xdd,
+ 0xf3, 0xb4, 0xc1, 0x73, 0x87, 0x07, 0x15, 0xbe, 0x71, 0x78, 0x23, 0x70, 0x14, 0xdc, 0x47, 0xa8,
+ 0x9e, 0x4f, 0x49, 0xb6, 0xdd, 0x74, 0xd5, 0xef, 0x9a, 0xa4, 0x0d, 0x86, 0x2e, 0xc7, 0x24, 0x3f,
+ 0x6f, 0x48, 0x9e, 0xba, 0x72, 0xc8, 0xfc, 0xdc, 0x6f, 0xc8, 0xda, 0x44, 0xfc, 0x0d, 0x7a, 0x54,
+ 0x7f, 0x51, 0xa6, 0xe2, 0x24, 0x22, 0x1f, 0xb9, 0x5b, 0xb9, 0x67, 0x34, 0x03, 0x8c, 0x5e, 0x27,
+ 0xe3, 0x2f, 0x50, 0x77, 0x12, 0x47, 0x13, 0xae, 0x74, 0x3e, 0x25, 0x79, 0xe6, 0xaa, 0x29, 0xe3,
+ 0xc8, 0x97, 0x5c, 0x41, 0xc2, 0x25, 0x6d, 0xb2, 0xf0, 0x2b, 0xb4, 0x3a, 0x89, 0xa3, 0xa3, 0xb3,
+ 0x37, 0x92, 0x3c, 0x77, 0xe3, 0xa2, 0x05, 0xc1, 0x82, 0x4b, 0x5a, 0x81, 0xba, 0x33, 0x8f, 0x66,
+ 0x31, 0x4f, 0xd4, 0xd1, 0x8c, 0x49, 0x49, 0x88, 0xeb, 0xcc, 0x00, 0xcc, 0x7e, 0xa0, 0xed, 0xb4,
+ 0x49, 0xc2, 0x23, 0xf4, 0xc4, 0x7c, 0xbe, 0x4e, 0xd3, 0x59, 0x1c, 0xc0, 0x59, 0xc9, 0x0e, 0x28,
+ 0x9f, 0x95, 0x85, 0x87, 0xad, 0x92, 0x39, 0x94, 0xde, 0x14, 0xe0, 0x9f, 0x10, 0xb9, 0x61, 0x3c,
+ 0xe3, 0x99, 0x7e, 0x46, 0xc9, 0x2e, 0x38, 0xdb, 0x2b, 0x0b, 0x6f, 0xf7, 0xa6, 0x33, 0x7f, 0x61,
+ 0x58, 0xf4, 0x4e, 0xbd, 0x7e, 0x2d, 0x26, 0x7c, 0xc1, 0xf5, 0x23, 0x4c, 0x3e, 0x6e, 0x5c, 0xdf,
+ 0xda, 0x68, 0x8d, 0xe2, 0x3f, 0xa3, 0xed, 0xd3, 0x8b, 0x8c, 0x33, 0x75, 0xc4, 0xe6, 0x29, 0x8b,
+ 0xa3, 0xc4, 0x24, 0xf9, 0x05, 0x88, 0x76, 0xca, 0xc2, 0xfb, 0x48, 0x01, 0xec, 0x07, 0x16, 0xb7,
+ 0xc9, 0xbe, 0x4d, 0xa5, 0x07, 0x6f, 0x28, 0xd4, 0xeb, 0x44, 0xcc, 0xd9, 0x2c, 0xe6, 0x92, 0x7c,
+ 0xe2, 0x4a, 0x75, 0x2e, 0x94, 0xcf, 0x2a, 0x80, 0xb6, 0x68, 0xba, 0x06, 0x43, 0xa1, 0xea, 0xbd,
+ 0x64, 0xcf, 0xd5, 0x40, 0xab, 0xea, 0xdd, 0xa4, 0x49, 0xc2, 0x6f, 0xd1, 0xd3, 0x37, 0xc9, 0x54,
+ 0x64, 0x01, 0x0f, 0x5b, 0x21, 0x3d, 0x10, 0xef, 0x96, 0x85, 0xf7, 0x8c, 0x5b, 0xdc, 0x6f, 0xc7,
+ 0xbe, 0x55, 0x87, 0x87, 0xe8, 0xf1, 0x50, 0xa8, 0x49, 0x1c, 0x25, 0x4c, 0xe5, 0x19, 0x87, 0xa9,
+ 0xde, 0x77, 0x25, 0xd5, 0x2e, 0x64, 0x05, 0x9a, 0x01, 0xbf, 0xc1, 0xd7, 0x7d, 0x51, 0xf7, 0xee,
+ 0x91, 0x48, 0x14, 0x7f, 0xaf, 0x24, 0xf9, 0xad, 0x73, 0xe2, 0x5a, 0x3d, 0xb0, 0x28, 0xbd, 0x29,
+ 0xc0, 0x1c, 0x6d, 0xb9, 0xe1, 0x19, 0xe9, 0xa5, 0xab, 0x07, 0xbf, 0xcd, 0xf7, 0x2d, 0x48, 0xb5,
+ 0x40, 0xf3, 0x87, 0xdb, 0x65, 0xe1, 0x3d, 0x72, 0xf3, 0xe8, 0xc3, 0x02, 0x76, 0xcd, 0x29, 0xfe,
+ 0x3d, 0x5a, 0x9b, 0x5c, 0x49, 0xc5, 0xe7, 0xe3, 0x11, 0x79, 0x09, 0x67, 0x84, 0x25, 0x54, 0x82,
+ 0x4d, 0xff, 0xb4, 0xd6, 0xb0, 0x2e, 0xeb, 0x38, 0x91, 0x8a, 0x25, 0x01, 0x3f, 0x65, 0x91, 0x24,
+ 0x9f, 0xba, 0xb2, 0xc6, 0xd6, 0xee, 0x2b, 0x16, 0x49, 0xda, 0xa2, 0xe9, 0x17, 0xa3, 0xfa, 0xfe,
+ 0x2e, 0x13, 0x79, 0x4a, 0x3e, 0x73, 0x2f, 0x46, 0xad, 0x8b, 0x34, 0x42, 0xdb, 0x44, 0xdd, 0x10,
+ 0xa3, 0x58, 0xa6, 0x33, 0x66, 0x56, 0x85, 0x57, 0xae, 0x21, 0x42, 0x63, 0x36, 0x15, 0x68, 0x92,
+ 0xf4, 0xc0, 0xbf, 0xd5, 0xc9, 0x19, 0x8f, 0xc8, 0xef, 0x5c, 0xc7, 0x43, 0xbe, 0xf4, 0x6d, 0x2a,
+ 0x10, 0x7f, 0x8d, 0xb6, 0x4e, 0x58, 0xa6, 0x17, 0x36, 0x21, 0x95, 0x76, 0x43, 0x0e, 0x80, 0x0e,
+ 0x49, 0x4b, 0x01, 0xf1, 0x2f, 0x2c, 0x44, 0xaf, 0x51, 0x7b, 0xff, 0xeb, 0xa0, 0xcd, 0xba, 0xe6,
+ 0x90, 0x46, 0x0f, 0x2d, 0x8d, 0x47, 0xcd, 0x65, 0x46, 0x3f, 0x31, 0x3a, 0xe1, 0x3a, 0xe8, 0xd2,
+ 0x78, 0x84, 0xbf, 0x41, 0x1b, 0xd5, 0xef, 0xfe, 0x31, 0x93, 0x97, 0x76, 0x8f, 0x81, 0x06, 0xad,
+ 0xa9, 0xf5, 0xce, 0x30, 0x67, 0xf2, 0x92, 0xb6, 0xf8, 0xf8, 0x73, 0xb4, 0x32, 0xcc, 0xa7, 0x53,
+ 0x9e, 0xd9, 0xad, 0x06, 0xce, 0xe9, 0x94, 0x00, 0x51, 0x4b, 0xd1, 0xe4, 0x1f, 0xa7, 0x53, 0xc9,
+ 0x95, 0xdd, 0x6a, 0xda, 0x64, 0x01, 0x10, 0xb5, 0x14, 0x4d, 0xfe, 0x81, 0x27, 0x91, 0xba, 0xb0,
+ 0xeb, 0x4b, 0x9b, 0x3c, 0x03, 0x88, 0x5a, 0x4a, 0xef, 0x6f, 0xa8, 0x6b, 0x3b, 0x14, 0xae, 0xfd,
+ 0x39, 0x5a, 0x6e, 0x6c, 0x71, 0xcf, 0xcb, 0xc2, 0xdb, 0x4e, 0x59, 0xc6, 0xe6, 0x5c, 0xf1, 0xcc,
+ 0xe8, 0x21, 0x7f, 0x40, 0xc2, 0x7d, 0xf4, 0xd0, 0xac, 0x1f, 0xe6, 0xee, 0xa4, 0x2c, 0xbc, 0xa7,
+ 0xd7, 0xd8, 0x66, 0x05, 0x31, 0xb4, 0xde, 0xbf, 0x97, 0xd0, 0x66, 0xab, 0xa3, 0x6f, 0x0b, 0xe7,
+ 0x86, 0xe9, 0x7a, 0xb8, 0x2f, 0xd1, 0xaa, 0x3d, 0x6a, 0x33, 0xd9, 0xd7, 0xf8, 0x76, 0x02, 0x69,
+ 0x45, 0xc5, 0x71, 0xeb, 0x82, 0xf6, 0x8f, 0x84, 0x57, 0xf7, 0xcc, 0x5c, 0x83, 0x3d, 0xf4, 0xca,
+ 0xc2, 0xfb, 0xf8, 0xf6, 0x08, 0x66, 0xfa, 0x5a, 0xc9, 0x8b, 0x10, 0xaa, 0x9b, 0x48, 0x92, 0xe5,
+ 0x5f, 0x9d, 0xee, 0x56, 0xc7, 0x99, 0x2d, 0xf3, 0x5a, 0xac, 0xfa, 0x69, 0x92, 0xb4, 0xe1, 0x7a,
+ 0x48, 0x7f, 0xfe, 0xb0, 0xd7, 0xf9, 0xe5, 0xc3, 0x5e, 0xe7, 0xbf, 0x1f, 0xf6, 0x3a, 0x3f, 0x8d,
+ 0xa2, 0x58, 0x5d, 0xe4, 0xe7, 0x7a, 0x9f, 0x1f, 0x40, 0xa4, 0x01, 0x44, 0x1a, 0xc8, 0xf0, 0x72,
+ 0xb0, 0x38, 0x34, 0x7f, 0x54, 0x0f, 0x4c, 0xc8, 0xaf, 0xef, 0x3c, 0xcc, 0xf9, 0x0a, 0xf0, 0xbe,
+ 0xf8, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0xb3, 0x5c, 0xb5, 0xd5, 0x0f, 0x00, 0x00,
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Category) > 0 {
+ i -= len(m.Category)
+ copy(dAtA[i:], m.Category)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Category)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.EventLevel) > 0 {
+ i -= len(m.EventLevel)
+ copy(dAtA[i:], m.EventLevel)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.EventLevel)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Timestamp != nil {
+ {
+ size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.CorrelationID) > 0 {
+ i -= len(m.CorrelationID)
+ copy(dAtA[i:], m.CorrelationID)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.CorrelationID)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.UUID) > 0 {
+ i -= len(m.UUID)
+ copy(dAtA[i:], m.UUID)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.UUID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Module) > 0 {
+ i -= len(m.Module)
+ copy(dAtA[i:], m.Module)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Module)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Data != nil {
+ {
+ size := m.Data.Size()
+ i -= size
+ if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if m.Metadata != nil {
+ {
+ size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Event_ActivityEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event_ActivityEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ActivityEvent != nil {
+ {
+ size, err := m.ActivityEvent.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Event_SecurityViolationEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event_SecurityViolationEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.SecurityViolationEvent != nil {
+ {
+ size, err := m.SecurityViolationEvent.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *EventReport) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EventReport) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventReport) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Events) > 0 {
+ for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ActivityEvent) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ActivityEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ActivityEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Dimensions) > 0 {
+ for iNdEx := len(m.Dimensions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Dimensions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecurityViolationEvent) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecurityViolationEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecurityViolationEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.ParentHostname) > 0 {
+ i -= len(m.ParentHostname)
+ copy(dAtA[i:], m.ParentHostname)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ParentHostname)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc2
+ }
+ if len(m.NginxID) > 0 {
+ i -= len(m.NginxID)
+ copy(dAtA[i:], m.NginxID)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.NginxID)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xba
+ }
+ if len(m.DisplayName) > 0 {
+ i -= len(m.DisplayName)
+ copy(dAtA[i:], m.DisplayName)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.DisplayName)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb2
+ }
+ if len(m.InstanceGroup) > 0 {
+ i -= len(m.InstanceGroup)
+ copy(dAtA[i:], m.InstanceGroup)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.InstanceGroup)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xaa
+ }
+ if len(m.InstanceTags) > 0 {
+ i -= len(m.InstanceTags)
+ copy(dAtA[i:], m.InstanceTags)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.InstanceTags)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa2
+ }
+ if len(m.SystemID) > 0 {
+ i -= len(m.SystemID)
+ copy(dAtA[i:], m.SystemID)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.SystemID)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x9a
+ }
+ if len(m.ViolationsData) > 0 {
+ for iNdEx := len(m.ViolationsData) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ViolationsData[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x92
+ }
+ }
+ if len(m.ViolationContexts) > 0 {
+ i -= len(m.ViolationContexts)
+ copy(dAtA[i:], m.ViolationContexts)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ViolationContexts)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x8a
+ }
+ if len(m.BotSignatureName) > 0 {
+ i -= len(m.BotSignatureName)
+ copy(dAtA[i:], m.BotSignatureName)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.BotSignatureName)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.EnforcedBotAnomalies) > 0 {
+ i -= len(m.EnforcedBotAnomalies)
+ copy(dAtA[i:], m.EnforcedBotAnomalies)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.EnforcedBotAnomalies)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xfa
+ }
+ if len(m.BotCategory) > 0 {
+ i -= len(m.BotCategory)
+ copy(dAtA[i:], m.BotCategory)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.BotCategory)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf2
+ }
+ if len(m.BotAnomalies) > 0 {
+ i -= len(m.BotAnomalies)
+ copy(dAtA[i:], m.BotAnomalies)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.BotAnomalies)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xea
+ }
+ if len(m.ThreatCampaignNames) > 0 {
+ i -= len(m.ThreatCampaignNames)
+ copy(dAtA[i:], m.ThreatCampaignNames)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ThreatCampaignNames)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe2
+ }
+ if len(m.Severity) > 0 {
+ i -= len(m.Severity)
+ copy(dAtA[i:], m.Severity)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Severity)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xda
+ }
+ if len(m.ClientApplicationVersion) > 0 {
+ i -= len(m.ClientApplicationVersion)
+ copy(dAtA[i:], m.ClientApplicationVersion)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ClientApplicationVersion)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd2
+ }
+ if len(m.ClientApplication) > 0 {
+ i -= len(m.ClientApplication)
+ copy(dAtA[i:], m.ClientApplication)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ClientApplication)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xca
+ }
+ if len(m.ClientClass) > 0 {
+ i -= len(m.ClientClass)
+ copy(dAtA[i:], m.ClientClass)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ClientClass)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc2
+ }
+ if len(m.SigCVEs) > 0 {
+ i -= len(m.SigCVEs)
+ copy(dAtA[i:], m.SigCVEs)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.SigCVEs)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xba
+ }
+ if len(m.SigSetNames) > 0 {
+ i -= len(m.SigSetNames)
+ copy(dAtA[i:], m.SigSetNames)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.SigSetNames)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb2
+ }
+ if len(m.ViolationRating) > 0 {
+ i -= len(m.ViolationRating)
+ copy(dAtA[i:], m.ViolationRating)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ViolationRating)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xaa
+ }
+ if len(m.SubViolations) > 0 {
+ i -= len(m.SubViolations)
+ copy(dAtA[i:], m.SubViolations)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.SubViolations)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xa2
+ }
+ if len(m.Violations) > 0 {
+ i -= len(m.Violations)
+ copy(dAtA[i:], m.Violations)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Violations)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x9a
+ }
+ if len(m.ServerPort) > 0 {
+ i -= len(m.ServerPort)
+ copy(dAtA[i:], m.ServerPort)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ServerPort)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if len(m.RemotePort) > 0 {
+ i -= len(m.RemotePort)
+ copy(dAtA[i:], m.RemotePort)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.RemotePort)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ if len(m.RemoteAddr) > 0 {
+ i -= len(m.RemoteAddr)
+ copy(dAtA[i:], m.RemoteAddr)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.RemoteAddr)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.VSName) > 0 {
+ i -= len(m.VSName)
+ copy(dAtA[i:], m.VSName)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.VSName)))
+ i--
+ dAtA[i] = 0x7a
+ }
+ if len(m.ServerAddr) > 0 {
+ i -= len(m.ServerAddr)
+ copy(dAtA[i:], m.ServerAddr)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ServerAddr)))
+ i--
+ dAtA[i] = 0x72
+ }
+ if len(m.ResponseCode) > 0 {
+ i -= len(m.ResponseCode)
+ copy(dAtA[i:], m.ResponseCode)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ResponseCode)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.RequestStatus) > 0 {
+ i -= len(m.RequestStatus)
+ copy(dAtA[i:], m.RequestStatus)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.RequestStatus)))
+ i--
+ dAtA[i] = 0x62
+ }
+ if len(m.IsTruncated) > 0 {
+ i -= len(m.IsTruncated)
+ copy(dAtA[i:], m.IsTruncated)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.IsTruncated)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.Request) > 0 {
+ i -= len(m.Request)
+ copy(dAtA[i:], m.Request)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Request)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.URI) > 0 {
+ i -= len(m.URI)
+ copy(dAtA[i:], m.URI)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.URI)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.XForwardedForHeaderValue) > 0 {
+ i -= len(m.XForwardedForHeaderValue)
+ copy(dAtA[i:], m.XForwardedForHeaderValue)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.XForwardedForHeaderValue)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.Protocol) > 0 {
+ i -= len(m.Protocol)
+ copy(dAtA[i:], m.Protocol)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Protocol)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Method) > 0 {
+ i -= len(m.Method)
+ copy(dAtA[i:], m.Method)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Method)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.BlockingExceptionReason) > 0 {
+ i -= len(m.BlockingExceptionReason)
+ copy(dAtA[i:], m.BlockingExceptionReason)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.BlockingExceptionReason)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.OutcomeReason) > 0 {
+ i -= len(m.OutcomeReason)
+ copy(dAtA[i:], m.OutcomeReason)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.OutcomeReason)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Outcome) > 0 {
+ i -= len(m.Outcome)
+ copy(dAtA[i:], m.Outcome)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Outcome)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.SupportID) > 0 {
+ i -= len(m.SupportID)
+ copy(dAtA[i:], m.SupportID)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.SupportID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PolicyName) > 0 {
+ i -= len(m.PolicyName)
+ copy(dAtA[i:], m.PolicyName)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.PolicyName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Length) > 0 {
+ i -= len(m.Length)
+ copy(dAtA[i:], m.Length)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Length)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Offset) > 0 {
+ i -= len(m.Offset)
+ copy(dAtA[i:], m.Offset)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Offset)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Buffer) > 0 {
+ i -= len(m.Buffer)
+ copy(dAtA[i:], m.Buffer)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Buffer)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.BlockingMask) > 0 {
+ i -= len(m.BlockingMask)
+ copy(dAtA[i:], m.BlockingMask)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.BlockingMask)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ContextData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ContextData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContextData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ViolationData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ViolationData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ViolationData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Signatures) > 0 {
+ for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.ContextData != nil {
+ {
+ size, err := m.ContextData.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Context) > 0 {
+ i -= len(m.Context)
+ copy(dAtA[i:], m.Context)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Context)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintEvent(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintEvent(dAtA []byte, offset int, v uint64) int {
+ offset -= sovEvent(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Metadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Module)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.UUID)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.CorrelationID)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if m.Timestamp != nil {
+ l = m.Timestamp.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.EventLevel)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Category)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Event) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Metadata != nil {
+ l = m.Metadata.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if m.Data != nil {
+ n += m.Data.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Event_ActivityEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ActivityEvent != nil {
+ l = m.ActivityEvent.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ return n
+}
+func (m *Event_SecurityViolationEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.SecurityViolationEvent != nil {
+ l = m.SecurityViolationEvent.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ return n
+}
+func (m *EventReport) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Events) > 0 {
+ for _, e := range m.Events {
+ l = e.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ActivityEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if len(m.Dimensions) > 0 {
+ for _, e := range m.Dimensions {
+ l = e.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SecurityViolationEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PolicyName)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.SupportID)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Outcome)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.OutcomeReason)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.BlockingExceptionReason)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Method)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Protocol)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.XForwardedForHeaderValue)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.URI)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Request)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.IsTruncated)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.RequestStatus)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ResponseCode)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ServerAddr)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.VSName)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.RemoteAddr)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.RemotePort)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ServerPort)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Violations)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.SubViolations)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ViolationRating)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.SigSetNames)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.SigCVEs)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ClientClass)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ClientApplication)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ClientApplicationVersion)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Severity)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ThreatCampaignNames)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.BotAnomalies)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.BotCategory)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.EnforcedBotAnomalies)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.BotSignatureName)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ViolationContexts)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ if len(m.ViolationsData) > 0 {
+ for _, e := range m.ViolationsData {
+ l = e.Size()
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ }
+ l = len(m.SystemID)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.InstanceTags)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.InstanceGroup)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.DisplayName)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.NginxID)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ l = len(m.ParentHostname)
+ if l > 0 {
+ n += 2 + l + sovEvent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SignatureData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.BlockingMask)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Buffer)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Offset)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Length)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ContextData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ViolationData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ l = len(m.Context)
+ if l > 0 {
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if m.ContextData != nil {
+ l = m.ContextData.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ if len(m.Signatures) > 0 {
+ for _, e := range m.Signatures {
+ l = e.Size()
+ n += 1 + l + sovEvent(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovEvent(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozEvent(x uint64) (n int) {
+ return sovEvent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Module", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Module = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UUID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CorrelationID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CorrelationID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Timestamp == nil {
+ m.Timestamp = &types.Timestamp{}
+ }
+ if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EventLevel", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EventLevel = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Category", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Category = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Event: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Metadata == nil {
+ m.Metadata = &Metadata{}
+ }
+ if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActivityEvent", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &ActivityEvent{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Event_ActivityEvent{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecurityViolationEvent", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &SecurityViolationEvent{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Data = &Event_SecurityViolationEvent{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EventReport) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EventReport: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EventReport: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Events = append(m.Events, &Event{})
+ if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ActivityEvent) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ActivityEvent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ActivityEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dimensions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Dimensions = append(m.Dimensions, &common.Dimension{})
+ if err := m.Dimensions[len(m.Dimensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecurityViolationEvent) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecurityViolationEvent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecurityViolationEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PolicyName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SupportID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SupportID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Outcome", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Outcome = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OutcomeReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OutcomeReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BlockingExceptionReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BlockingExceptionReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Method = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Protocol = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field XForwardedForHeaderValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XForwardedForHeaderValue = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.URI = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Request = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsTruncated", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IsTruncated = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestStatus", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RequestStatus = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseCode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResponseCode = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServerAddr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServerAddr = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VSName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VSName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemoteAddr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RemoteAddr = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemotePort", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RemotePort = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 18:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServerPort", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServerPort = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 19:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Violations = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 20:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubViolations", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubViolations = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 21:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ViolationRating", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ViolationRating = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 22:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SigSetNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SigSetNames = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 23:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SigCVEs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SigCVEs = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 24:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientClass", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientClass = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 25:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientApplication", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientApplication = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 26:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientApplicationVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientApplicationVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 27:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Severity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Severity = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 28:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ThreatCampaignNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ThreatCampaignNames = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 29:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BotAnomalies", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BotAnomalies = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 30:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BotCategory", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BotCategory = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 31:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EnforcedBotAnomalies", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EnforcedBotAnomalies = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 32:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BotSignatureName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BotSignatureName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 33:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ViolationContexts", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ViolationContexts = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 34:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ViolationsData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ViolationsData = append(m.ViolationsData, &ViolationData{})
+ if err := m.ViolationsData[len(m.ViolationsData)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 35:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SystemID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 36:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InstanceTags", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.InstanceTags = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 37:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InstanceGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.InstanceGroup = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 38:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisplayName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 39:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NginxID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 40:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ParentHostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ParentHostname = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BlockingMask", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BlockingMask = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Buffer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Buffer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Offset = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Length = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContextData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContextData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContextData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ViolationData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ViolationData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ViolationData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContextData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ContextData == nil {
+ m.ContextData = &ContextData{}
+ }
+ if err := m.ContextData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthEvent
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signatures = append(m.Signatures, &SignatureData{})
+ if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEvent(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEvent
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipEvent(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEvent
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthEvent
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupEvent
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthEvent
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthEvent = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEvent = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupEvent = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.proto
new file mode 100644
index 000000000..4dfc67f58
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.proto
@@ -0,0 +1,140 @@
+// Event messages
+syntax = "proto3";
+package f5.nginx.agent.sdk.events;
+
+import "common/common.proto";
+import "gogo.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto/events;f5.nginx.agent.sdk.events";
+
+// Represents the metadata for an event
+message Metadata {
+ // Module is the process that generate the event
+ string Module = 1 [(gogoproto.jsontag) = "module"];
+ // UUID is a unique identifier for each event
+ string UUID = 2 [(gogoproto.jsontag) = "uuid"];
+ // CorrelationID is an ID used by the producer of the message to track the flow of events
+ string CorrelationID = 3 [(gogoproto.jsontag) = "correlation_id"];
+ // Timestamp defines the time of event generation
+ google.protobuf.Timestamp Timestamp = 4 [(gogoproto.jsontag) = "timestamp"];
+ // EventLevel defines the criticality of event
+ string EventLevel = 5 [(gogoproto.jsontag) = "event_level"];
+ // Type is used to identify the event type for further processing
+ string Type = 6 [(gogoproto.jsontag) = "type"];
+ // Category is used for classifying the event type into a higher level entity
+ string Category = 7 [(gogoproto.jsontag) = "category"];
+}
+
+// Represents an event
+message Event {
+ // Event metadata
+ Metadata Metadata = 1 [(gogoproto.jsontag) = "metadata"];
+ oneof data {
+ ActivityEvent ActivityEvent = 2 [(gogoproto.jsontag) = "activity_event"]; // Activity event
+
+ /**
+ * While generating the SecurityViolationEvent, the Metadata for a SecurityViolationEvent
+ * would look as shown below:
+ * - Module = Agent
+ * - UUID = A UUID generated by the Agent for the EventReport
+ * - CorrelationID = The UUID will be used as the Correlation ID to track the EventReport
+ * - Timestamp = The timestamp when NGINX Agent received the violation event
+ * - EventLevel = All the SecurityViolationEvent would be generated at an ERROR_EVENT_LEVEL ("ERROR") level
+ * In future, the levels might be dynamically chosen based on Request Outcome of SecurityViolationEvent
+ * - Type = NGINX_EVENT_TYPE ("Nginx")
+ * - Category = APP_PROTECT_CATEGORY ("AppProtect")
+ */
+
+ SecurityViolationEvent SecurityViolationEvent = 3 [(gogoproto.jsontag) = "security_violation_event"]; // Security violation event
+ }
+}
+
+// Represents an event report
+message EventReport {
+ // Array of events
+ repeated Event Events = 1 [(gogoproto.jsontag) = "events"];
+}
+
+// Represents an activity event
+message ActivityEvent {
+ // Activtiy event message
+ string Message = 1 [(gogoproto.jsontag) = "message"];
+ // Array of dimensions
+ repeated f5.nginx.agent.sdk.common.Dimension Dimensions = 2 [(gogoproto.jsontag) = "dimensions"];
+}
+
+// Represents a security violation that is emitted by the agent
+message SecurityViolationEvent {
+ string PolicyName = 1 [(gogoproto.jsontag) = "policy_name"];
+ string SupportID = 2 [(gogoproto.jsontag) = "support_id"];
+
+ string Outcome = 3 [(gogoproto.jsontag) = "outcome"];
+ string OutcomeReason = 4 [(gogoproto.jsontag) = "outcome_reason"];
+ string BlockingExceptionReason = 5 [(gogoproto.jsontag) = "blocking_exception_reason"];
+
+ string Method = 6 [(gogoproto.jsontag) = "method"];
+ string Protocol = 7 [(gogoproto.jsontag) = "protocol"];
+ string XForwardedForHeaderValue = 8 [(gogoproto.jsontag) = "xff_header_value"];
+
+ string URI = 9 [(gogoproto.jsontag) = "uri"];
+ string Request = 10 [(gogoproto.jsontag) = "request"];
+ string IsTruncated = 11 [(gogoproto.jsontag) = "is_truncated"];
+ string RequestStatus = 12 [(gogoproto.jsontag) = "request_status"];
+ string ResponseCode = 13 [(gogoproto.jsontag) = "response_code"];
+
+ string ServerAddr = 14 [(gogoproto.jsontag) = "server_addr"];
+ string VSName = 15 [(gogoproto.jsontag) = "vs_name"];
+ string RemoteAddr = 16 [(gogoproto.jsontag) = "remote_addr"];
+ string RemotePort = 17 [(gogoproto.jsontag) = "destination_port"];
+ string ServerPort = 18 [(gogoproto.jsontag) = "server_port"];
+
+ string Violations = 19 [(gogoproto.jsontag) = "violations"];
+ string SubViolations = 20 [(gogoproto.jsontag) = "sub_violations"];
+ string ViolationRating = 21 [(gogoproto.jsontag) = "violation_rating"];
+
+ string SigSetNames = 22 [(gogoproto.jsontag) = "sig_set_names"];
+ string SigCVEs = 23 [(gogoproto.jsontag) = "sig_cves"];
+
+ string ClientClass = 24 [(gogoproto.jsontag) = "client_class"];
+ string ClientApplication = 25 [(gogoproto.jsontag) = "client_application"];
+ string ClientApplicationVersion = 26 [(gogoproto.jsontag) = "client_application_version"];
+
+ string Severity = 27 [(gogoproto.jsontag) = "severity"];
+ string ThreatCampaignNames = 28 [(gogoproto.jsontag) = "threat_campaign_names"];
+
+ string BotAnomalies = 29 [(gogoproto.jsontag) = "bot_anomalies"];
+ string BotCategory = 30 [(gogoproto.jsontag) = "bot_category"];
+ string EnforcedBotAnomalies = 31 [(gogoproto.jsontag) = "enforced_bot_anomalies"];
+ string BotSignatureName = 32 [(gogoproto.jsontag) = "bot_signature_name"];
+
+ string ViolationContexts = 33 [(gogoproto.jsontag) = "violation_contexts"];
+ repeated ViolationData ViolationsData = 34 [(gogoproto.jsontag) = "violations_data"];
+
+ string SystemID = 35 [(gogoproto.jsontag) = "system_id"];
+ string InstanceTags = 36 [(gogoproto.jsontag) = "instance_tags"];
+ string InstanceGroup = 37 [(gogoproto.jsontag) = "instance_group"];
+ string DisplayName = 38 [(gogoproto.jsontag) = "display_name"];
+ string NginxID = 39 [(gogoproto.jsontag) = "nginx_id"];
+ string ParentHostname = 40 [(gogoproto.jsontag) = "parent_hostname"];
+}
+
+message SignatureData {
+ string ID = 1 [(gogoproto.jsontag) = "sig_data_id"];
+ string BlockingMask = 2 [(gogoproto.jsontag) = "sig_data_blocking_mask"];
+ string Buffer = 3 [(gogoproto.jsontag) = "sig_data_buffer"];
+ string Offset = 4 [(gogoproto.jsontag) = "sig_data_offset"];
+ string Length = 5 [(gogoproto.jsontag) = "sig_data_length"];
+}
+
+message ContextData {
+ string Name = 1 [(gogoproto.jsontag) = "parameter_data_name"];
+ string Value = 2 [(gogoproto.jsontag) = "parameter_data_value"];
+}
+
+message ViolationData {
+ string Name = 1 [(gogoproto.jsontag) = "violation_data_name"];
+ string Context = 2 [(gogoproto.jsontag) = "violation_data_context"];
+ ContextData ContextData = 3 [(gogoproto.jsontag) = "violation_data_context_data"];
+ repeated SignatureData Signatures = 4 [(gogoproto.jsontag) = "violation_data_signatures"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.pb.go
new file mode 100644
index 000000000..0e5d9b0e6
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.pb.go
@@ -0,0 +1,3191 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: host.proto
+
+package proto
+
+import (
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Represents the host system information
+type HostInfo struct {
+ // NGINX Agent version
+ Agent string `protobuf:"bytes,1,opt,name=agent,proto3" json:"agent"`
+ // Host boot time
+ Boot uint64 `protobuf:"varint,2,opt,name=boot,proto3" json:"boot"`
+ // Hostname
+ Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname"`
+ // Display Name
+ DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name"`
+ // OS type (e.g. freebsd, linux, etc)
+ OsType string `protobuf:"bytes,5,opt,name=os_type,json=osType,proto3" json:"os-type"`
+ // Host UUID
+ Uuid string `protobuf:"bytes,6,opt,name=uuid,proto3" json:"uuid"`
+ // The native cpu architecture queried at runtime, as returned by `uname -m` or empty string in case of error
+ Uname string `protobuf:"bytes,7,opt,name=uname,proto3" json:"uname"`
+ // List of disk partitions
+ Partitons []*DiskPartition `protobuf:"bytes,8,rep,name=partitons,proto3" json:"disk_partitions"`
+ // Network information
+ Network *Network `protobuf:"bytes,9,opt,name=network,proto3" json:"network"`
+ // List of CPU processor information
+ Processor []*CpuInfo `protobuf:"bytes,10,rep,name=processor,proto3" json:"processor"`
+ // Release Information
+ Release *ReleaseInfo `protobuf:"bytes,11,opt,name=release,proto3" json:"release"`
+ // List of tags
+ Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags"`
+ // List of directories that the NGINX Agent is allowed to access on the host
+ AgentAccessibleDirs string `protobuf:"bytes,13,opt,name=agent_accessible_dirs,json=agentAccessibleDirs,proto3" json:"agent_accessible_dirs"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HostInfo) Reset() { *m = HostInfo{} }
+func (m *HostInfo) String() string { return proto.CompactTextString(m) }
+func (*HostInfo) ProtoMessage() {}
+func (*HostInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{0}
+}
+func (m *HostInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HostInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HostInfo.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HostInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HostInfo.Merge(m, src)
+}
+func (m *HostInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *HostInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_HostInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostInfo proto.InternalMessageInfo
+
+func (m *HostInfo) GetAgent() string {
+ if m != nil {
+ return m.Agent
+ }
+ return ""
+}
+
+func (m *HostInfo) GetBoot() uint64 {
+ if m != nil {
+ return m.Boot
+ }
+ return 0
+}
+
+func (m *HostInfo) GetHostname() string {
+ if m != nil {
+ return m.Hostname
+ }
+ return ""
+}
+
+func (m *HostInfo) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *HostInfo) GetOsType() string {
+ if m != nil {
+ return m.OsType
+ }
+ return ""
+}
+
+func (m *HostInfo) GetUuid() string {
+ if m != nil {
+ return m.Uuid
+ }
+ return ""
+}
+
+func (m *HostInfo) GetUname() string {
+ if m != nil {
+ return m.Uname
+ }
+ return ""
+}
+
+func (m *HostInfo) GetPartitons() []*DiskPartition {
+ if m != nil {
+ return m.Partitons
+ }
+ return nil
+}
+
+func (m *HostInfo) GetNetwork() *Network {
+ if m != nil {
+ return m.Network
+ }
+ return nil
+}
+
+func (m *HostInfo) GetProcessor() []*CpuInfo {
+ if m != nil {
+ return m.Processor
+ }
+ return nil
+}
+
+func (m *HostInfo) GetRelease() *ReleaseInfo {
+ if m != nil {
+ return m.Release
+ }
+ return nil
+}
+
+func (m *HostInfo) GetTags() []string {
+ if m != nil {
+ return m.Tags
+ }
+ return nil
+}
+
+func (m *HostInfo) GetAgentAccessibleDirs() string {
+ if m != nil {
+ return m.AgentAccessibleDirs
+ }
+ return ""
+}
+
+// Represents a disk partition
+type DiskPartition struct {
+ // Mount point location
+ MountPoint string `protobuf:"bytes,1,opt,name=mount_point,json=mountPoint,proto3" json:"mountpoint"`
+ // Device file path
+ Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device"`
+ // File system type (e.g. hfs, swap, etc)
+ FsType string `protobuf:"bytes,3,opt,name=fs_type,json=fsType,proto3" json:"fstype"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DiskPartition) Reset() { *m = DiskPartition{} }
+func (m *DiskPartition) String() string { return proto.CompactTextString(m) }
+func (*DiskPartition) ProtoMessage() {}
+func (*DiskPartition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{1}
+}
+func (m *DiskPartition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DiskPartition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DiskPartition.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DiskPartition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DiskPartition.Merge(m, src)
+}
+func (m *DiskPartition) XXX_Size() int {
+ return m.Size()
+}
+func (m *DiskPartition) XXX_DiscardUnknown() {
+ xxx_messageInfo_DiskPartition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DiskPartition proto.InternalMessageInfo
+
+func (m *DiskPartition) GetMountPoint() string {
+ if m != nil {
+ return m.MountPoint
+ }
+ return ""
+}
+
+func (m *DiskPartition) GetDevice() string {
+ if m != nil {
+ return m.Device
+ }
+ return ""
+}
+
+func (m *DiskPartition) GetFsType() string {
+ if m != nil {
+ return m.FsType
+ }
+ return ""
+}
+
+// Represents a network
+type Network struct {
+ // List of network interfaces
+ Interfaces []*NetworkInterface `protobuf:"bytes,1,rep,name=interfaces,proto3" json:"interfaces"`
+ // Default network name
+ Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Network) Reset() { *m = Network{} }
+func (m *Network) String() string { return proto.CompactTextString(m) }
+func (*Network) ProtoMessage() {}
+func (*Network) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{2}
+}
+func (m *Network) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Network) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Network.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Network) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Network.Merge(m, src)
+}
+func (m *Network) XXX_Size() int {
+ return m.Size()
+}
+func (m *Network) XXX_DiscardUnknown() {
+ xxx_messageInfo_Network.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Network proto.InternalMessageInfo
+
+func (m *Network) GetInterfaces() []*NetworkInterface {
+ if m != nil {
+ return m.Interfaces
+ }
+ return nil
+}
+
+func (m *Network) GetDefault() string {
+ if m != nil {
+ return m.Default
+ }
+ return ""
+}
+
+// Represents a network interface
+type NetworkInterface struct {
+ // MAC address
+ Mac string `protobuf:"bytes,1,opt,name=mac,proto3" json:"mac"`
+ // List of IPV6 addresses
+ Ipv6 []*Address `protobuf:"bytes,2,rep,name=ipv6,proto3" json:"ipv6"`
+ // List of IPV4 addresses
+ Ipv4 []*Address `protobuf:"bytes,3,rep,name=ipv4,proto3" json:"ipv4"`
+ // Name of network interface
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NetworkInterface) Reset() { *m = NetworkInterface{} }
+func (m *NetworkInterface) String() string { return proto.CompactTextString(m) }
+func (*NetworkInterface) ProtoMessage() {}
+func (*NetworkInterface) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{3}
+}
+func (m *NetworkInterface) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkInterface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NetworkInterface.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NetworkInterface) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkInterface.Merge(m, src)
+}
+func (m *NetworkInterface) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkInterface) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkInterface.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkInterface proto.InternalMessageInfo
+
+func (m *NetworkInterface) GetMac() string {
+ if m != nil {
+ return m.Mac
+ }
+ return ""
+}
+
+func (m *NetworkInterface) GetIpv6() []*Address {
+ if m != nil {
+ return m.Ipv6
+ }
+ return nil
+}
+
+func (m *NetworkInterface) GetIpv4() []*Address {
+ if m != nil {
+ return m.Ipv4
+ }
+ return nil
+}
+
+func (m *NetworkInterface) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// Represents an IP address
+type Address struct {
+ // Prefix length
+ Prefixlen int64 `protobuf:"varint,1,opt,name=prefixlen,proto3" json:"prefixlen"`
+ // Netmask
+ Netmask string `protobuf:"bytes,2,opt,name=netmask,proto3" json:"netmask"`
+ // IP Address
+ Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Address) Reset() { *m = Address{} }
+func (m *Address) String() string { return proto.CompactTextString(m) }
+func (*Address) ProtoMessage() {}
+func (*Address) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{4}
+}
+func (m *Address) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Address.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Address) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Address.Merge(m, src)
+}
+func (m *Address) XXX_Size() int {
+ return m.Size()
+}
+func (m *Address) XXX_DiscardUnknown() {
+ xxx_messageInfo_Address.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Address proto.InternalMessageInfo
+
+func (m *Address) GetPrefixlen() int64 {
+ if m != nil {
+ return m.Prefixlen
+ }
+ return 0
+}
+
+func (m *Address) GetNetmask() string {
+ if m != nil {
+ return m.Netmask
+ }
+ return ""
+}
+
+func (m *Address) GetAddress() string {
+ if m != nil {
+ return m.Address
+ }
+ return ""
+}
+
+// Represents CPU information
+type CpuInfo struct {
+ // Model of CPU
+ Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model"`
+ // Number of cores
+ Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores"`
+ // CPU architecture
+ Architecture string `protobuf:"bytes,3,opt,name=architecture,proto3" json:"architecture"`
+ // CPU clock speed in MHz
+ Mhz float64 `protobuf:"fixed64,4,opt,name=mhz,proto3" json:"mhz"`
+ // Hypervisor (e.g. VMWare, KVM, etc.)
+ Hypervisor string `protobuf:"bytes,5,opt,name=hypervisor,proto3" json:"hypervisor"`
+ // Total number of CPUs
+ Cpus int32 `protobuf:"varint,6,opt,name=cpus,proto3" json:"cpus"`
+ // Type of hypervisor (e.g guest or host)
+ Virtualization string `protobuf:"bytes,7,opt,name=virtualization,proto3" json:"virtualization"`
+ // Map of caches with names as the keys and size in bytes as the values
+ Cache map[string]string `protobuf:"bytes,8,rep,name=cache,proto3" json:"cache" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CpuInfo) Reset() { *m = CpuInfo{} }
+func (m *CpuInfo) String() string { return proto.CompactTextString(m) }
+func (*CpuInfo) ProtoMessage() {}
+func (*CpuInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{5}
+}
+func (m *CpuInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CpuInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CpuInfo.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CpuInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CpuInfo.Merge(m, src)
+}
+func (m *CpuInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *CpuInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_CpuInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CpuInfo proto.InternalMessageInfo
+
+func (m *CpuInfo) GetModel() string {
+ if m != nil {
+ return m.Model
+ }
+ return ""
+}
+
+func (m *CpuInfo) GetCores() int32 {
+ if m != nil {
+ return m.Cores
+ }
+ return 0
+}
+
+func (m *CpuInfo) GetArchitecture() string {
+ if m != nil {
+ return m.Architecture
+ }
+ return ""
+}
+
+func (m *CpuInfo) GetMhz() float64 {
+ if m != nil {
+ return m.Mhz
+ }
+ return 0
+}
+
+func (m *CpuInfo) GetHypervisor() string {
+ if m != nil {
+ return m.Hypervisor
+ }
+ return ""
+}
+
+func (m *CpuInfo) GetCpus() int32 {
+ if m != nil {
+ return m.Cpus
+ }
+ return 0
+}
+
+func (m *CpuInfo) GetVirtualization() string {
+ if m != nil {
+ return m.Virtualization
+ }
+ return ""
+}
+
+func (m *CpuInfo) GetCache() map[string]string {
+ if m != nil {
+ return m.Cache
+ }
+ return nil
+}
+
+// Represents release information
+type ReleaseInfo struct {
+ // OS type (e.g. freebsd, linux, etc)
+ Codename string `protobuf:"bytes,1,opt,name=codename,proto3" json:"codename"`
+ // OS name (e.g. ubuntu, linuxmint, etc)
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id"`
+ // OS family (e.g. debian, rhel)
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name"`
+ // Version of the OS kernel
+ VersionId string `protobuf:"bytes,4,opt,name=version_id,json=versionId,proto3" json:"version_id"`
+ // Version of the OS
+ Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ReleaseInfo) Reset() { *m = ReleaseInfo{} }
+func (m *ReleaseInfo) String() string { return proto.CompactTextString(m) }
+func (*ReleaseInfo) ProtoMessage() {}
+func (*ReleaseInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85e40b83b4d50a8d, []int{6}
+}
+func (m *ReleaseInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReleaseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ReleaseInfo.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ReleaseInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReleaseInfo.Merge(m, src)
+}
+func (m *ReleaseInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReleaseInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReleaseInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReleaseInfo proto.InternalMessageInfo
+
+func (m *ReleaseInfo) GetCodename() string {
+ if m != nil {
+ return m.Codename
+ }
+ return ""
+}
+
+func (m *ReleaseInfo) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *ReleaseInfo) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ReleaseInfo) GetVersionId() string {
+ if m != nil {
+ return m.VersionId
+ }
+ return ""
+}
+
+func (m *ReleaseInfo) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*HostInfo)(nil), "f5.nginx.agent.sdk.HostInfo")
+ proto.RegisterType((*DiskPartition)(nil), "f5.nginx.agent.sdk.DiskPartition")
+ proto.RegisterType((*Network)(nil), "f5.nginx.agent.sdk.Network")
+ proto.RegisterType((*NetworkInterface)(nil), "f5.nginx.agent.sdk.NetworkInterface")
+ proto.RegisterType((*Address)(nil), "f5.nginx.agent.sdk.Address")
+ proto.RegisterType((*CpuInfo)(nil), "f5.nginx.agent.sdk.CpuInfo")
+ proto.RegisterMapType((map[string]string)(nil), "f5.nginx.agent.sdk.CpuInfo.CacheEntry")
+ proto.RegisterType((*ReleaseInfo)(nil), "f5.nginx.agent.sdk.ReleaseInfo")
+}
+
+func init() { proto.RegisterFile("host.proto", fileDescriptor_85e40b83b4d50a8d) }
+
+var fileDescriptor_85e40b83b4d50a8d = []byte{
+ // 950 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x41, 0x8f, 0xdb, 0x44,
+ 0x14, 0x96, 0xe3, 0xcd, 0x7a, 0xf3, 0xb2, 0x5b, 0x56, 0x53, 0x40, 0x6e, 0x41, 0xeb, 0x10, 0x0a,
+ 0x0a, 0x42, 0x75, 0xa4, 0xed, 0x02, 0xa5, 0x9c, 0x9a, 0x16, 0xe8, 0x1e, 0xa8, 0xaa, 0xd1, 0x9e,
+ 0xb8, 0x44, 0x5e, 0x7b, 0x92, 0x8c, 0x92, 0x78, 0x2c, 0xcf, 0x38, 0x34, 0x7b, 0x46, 0x48, 0xfc,
+ 0x11, 0x24, 0x7e, 0x06, 0xe2, 0xc2, 0x91, 0x5f, 0x60, 0xa1, 0x3d, 0xfa, 0x57, 0xa0, 0x79, 0x33,
+ 0x8e, 0xb3, 0x4b, 0xa8, 0x7a, 0x79, 0xf3, 0xde, 0xe7, 0x6f, 0x9e, 0xc7, 0xef, 0x7d, 0xf3, 0x0c,
+ 0x30, 0x13, 0x52, 0x85, 0x59, 0x2e, 0x94, 0x20, 0x64, 0xf2, 0x45, 0x98, 0x4e, 0x79, 0xfa, 0x3a,
+ 0x8c, 0xa6, 0x2c, 0x55, 0xa1, 0x4c, 0xe6, 0xf7, 0x61, 0x2a, 0xa6, 0xc2, 0x3c, 0xef, 0xff, 0xd6,
+ 0x86, 0x83, 0x17, 0x42, 0xaa, 0xf3, 0x74, 0x22, 0x48, 0x00, 0x6d, 0x64, 0xf9, 0x4e, 0xcf, 0x19,
+ 0x74, 0x46, 0x9d, 0xaa, 0x0c, 0x0c, 0x40, 0xcd, 0x42, 0x3e, 0x84, 0xbd, 0x4b, 0x21, 0x94, 0xdf,
+ 0xea, 0x39, 0x83, 0xbd, 0xd1, 0x41, 0x55, 0x06, 0x18, 0x53, 0xb4, 0x64, 0x00, 0x07, 0xfa, 0xcd,
+ 0x69, 0xb4, 0x64, 0xbe, 0x8b, 0x19, 0x0e, 0xab, 0x32, 0xd8, 0x60, 0x74, 0xe3, 0x91, 0x47, 0x70,
+ 0x98, 0x70, 0x99, 0x2d, 0xa2, 0xf5, 0x18, 0xd9, 0x7b, 0xc8, 0x3e, 0xae, 0xca, 0xe0, 0x06, 0x4e,
+ 0xbb, 0x36, 0x7a, 0xa9, 0x37, 0x3d, 0x00, 0x4f, 0xc8, 0xb1, 0x5a, 0x67, 0xcc, 0x6f, 0x23, 0xbf,
+ 0x5b, 0x95, 0x81, 0x27, 0xe4, 0x43, 0x0d, 0xd1, 0x7d, 0x21, 0x2f, 0xd6, 0x19, 0xd3, 0x47, 0x2c,
+ 0x0a, 0x9e, 0xf8, 0xfb, 0x48, 0xc1, 0x23, 0xea, 0x98, 0xa2, 0xd5, 0x5f, 0x58, 0xe0, 0x1b, 0xbd,
+ 0xe6, 0x0b, 0x11, 0xa0, 0x66, 0x21, 0x17, 0xd0, 0xc9, 0xa2, 0x5c, 0x71, 0x25, 0x52, 0xe9, 0x1f,
+ 0xf4, 0xdc, 0x41, 0xf7, 0xf4, 0xa3, 0xf0, 0xbf, 0x35, 0x0c, 0x9f, 0x73, 0x39, 0x7f, 0x85, 0x44,
+ 0x2e, 0xd2, 0xd1, 0xdd, 0xaa, 0x0c, 0xde, 0x49, 0xb8, 0x9c, 0x8f, 0xb3, 0x1a, 0x93, 0xb4, 0x49,
+ 0x44, 0x46, 0xe0, 0xa5, 0x4c, 0xfd, 0x24, 0xf2, 0xb9, 0xdf, 0xe9, 0x39, 0x83, 0xee, 0xe9, 0x07,
+ 0xbb, 0x72, 0xbe, 0x34, 0x14, 0xf3, 0x5d, 0x96, 0x4f, 0x6b, 0x87, 0xbc, 0x80, 0x4e, 0x96, 0x8b,
+ 0x98, 0x49, 0x29, 0x72, 0x1f, 0xf0, 0x64, 0x3b, 0xb3, 0x3c, 0xcb, 0x0a, 0xdd, 0xcc, 0xd1, 0x51,
+ 0x55, 0x06, 0xcd, 0x0e, 0xda, 0xb8, 0xe4, 0x3b, 0xf0, 0x72, 0xb6, 0x60, 0x91, 0x64, 0x7e, 0x17,
+ 0x4f, 0x13, 0xec, 0xca, 0x43, 0x0d, 0x05, 0x73, 0xe1, 0x89, 0xec, 0x1e, 0x5a, 0x3b, 0xba, 0xd4,
+ 0x2a, 0x9a, 0x4a, 0xff, 0xb0, 0xe7, 0xd6, 0xa5, 0xd6, 0x31, 0x45, 0x4b, 0x7e, 0x80, 0xf7, 0x30,
+ 0xd9, 0x38, 0x8a, 0xf5, 0x7b, 0xf9, 0xe5, 0x82, 0x8d, 0x13, 0x9e, 0x4b, 0xff, 0x08, 0x4b, 0x7f,
+ 0xaf, 0x2a, 0x83, 0xdd, 0x04, 0x7a, 0x17, 0xe1, 0xa7, 0x1b, 0xf4, 0x39, 0xcf, 0x65, 0xff, 0x57,
+ 0x07, 0x8e, 0x6e, 0x14, 0x9d, 0x0c, 0xa1, 0xbb, 0x14, 0x45, 0xaa, 0xc6, 0x99, 0xe0, 0x1b, 0xcd,
+ 0xde, 0xa9, 0xca, 0x00, 0x10, 0x46, 0x94, 0x1a, 0xff, 0x95, 0xf6, 0x49, 0x1f, 0xf6, 0x13, 0xb6,
+ 0xe2, 0x31, 0x43, 0xfd, 0x76, 0x46, 0x50, 0x95, 0x81, 0x45, 0xa8, 0x5d, 0xc9, 0xc7, 0xe0, 0x4d,
+ 0xac, 0xc8, 0xdc, 0x86, 0x34, 0x91, 0x46, 0x63, 0x13, 0xd4, 0x58, 0xff, 0x17, 0x07, 0x3c, 0xdb,
+ 0x2c, 0x72, 0x01, 0xc0, 0x53, 0xc5, 0xf2, 0x49, 0x14, 0x33, 0xe9, 0x3b, 0xd8, 0x97, 0x07, 0x6f,
+ 0xe8, 0xee, 0x79, 0x4d, 0x36, 0x47, 0x6d, 0xf6, 0xd2, 0x2d, 0x9f, 0x7c, 0x02, 0x5e, 0xc2, 0x26,
+ 0x51, 0xb1, 0x50, 0xf6, 0xac, 0xd8, 0x01, 0x0b, 0xd1, 0xda, 0xe9, 0xff, 0xe1, 0xc0, 0xf1, 0xed,
+ 0xbc, 0xe4, 0x1e, 0xb8, 0xcb, 0x28, 0xb6, 0xf5, 0xf0, 0xaa, 0x32, 0xd0, 0x21, 0xd5, 0x86, 0x7c,
+ 0x0d, 0x7b, 0x3c, 0x5b, 0x7d, 0xe9, 0xb7, 0xfe, 0x5f, 0x3e, 0x4f, 0x93, 0x24, 0x67, 0x52, 0x9a,
+ 0x76, 0x6a, 0x32, 0x45, 0x6b, 0xb7, 0x9e, 0xf9, 0xee, 0xdb, 0x6f, 0x3d, 0xc3, 0xad, 0x67, 0x5a,
+ 0x27, 0x5b, 0xb7, 0x1c, 0x9f, 0xe2, 0x95, 0x43, 0xdb, 0xff, 0xd9, 0x01, 0xcf, 0xee, 0x24, 0x9f,
+ 0x6b, 0x8d, 0xb3, 0x09, 0x7f, 0xbd, 0x60, 0x29, 0x7e, 0x80, 0x5b, 0xcb, 0xd8, 0x82, 0xb4, 0x71,
+ 0x75, 0x8d, 0x52, 0xa6, 0x96, 0x91, 0x9c, 0x6f, 0xd7, 0xc8, 0x42, 0xb4, 0x76, 0x34, 0x2d, 0x32,
+ 0xe9, 0x6d, 0x47, 0x91, 0x66, 0x21, 0x5a, 0x3b, 0xfd, 0xdf, 0x5d, 0xf0, 0xec, 0xd5, 0xd1, 0x53,
+ 0x62, 0x29, 0x12, 0xb6, 0xd8, 0x9e, 0x83, 0x08, 0x50, 0xb3, 0x68, 0x42, 0x2c, 0x72, 0x26, 0xf1,
+ 0xc5, 0x6d, 0x43, 0x40, 0x80, 0x9a, 0x85, 0x9c, 0xc1, 0x61, 0x94, 0xc7, 0x33, 0xae, 0x58, 0xac,
+ 0x8a, 0xbc, 0xd6, 0x12, 0x0e, 0xb8, 0x6d, 0x9c, 0xde, 0x88, 0xb0, 0x73, 0xb3, 0x2b, 0xac, 0x93,
+ 0x63, 0x3b, 0x37, 0xbb, 0xa2, 0xda, 0x90, 0x10, 0x60, 0xb6, 0xce, 0x58, 0xbe, 0xe2, 0xfa, 0xfa,
+ 0xb7, 0x1b, 0xad, 0x37, 0x28, 0xdd, 0xf2, 0x75, 0xcd, 0xe3, 0xac, 0x90, 0x38, 0x06, 0xdb, 0xa6,
+ 0xe6, 0x3a, 0xa6, 0x68, 0xc9, 0x13, 0xb8, 0xb3, 0xe2, 0xb9, 0x2a, 0xa2, 0x05, 0xbf, 0x8a, 0xf4,
+ 0x65, 0xb2, 0xf3, 0x90, 0x54, 0x65, 0x70, 0xeb, 0x09, 0xbd, 0x15, 0x93, 0xef, 0xa1, 0x1d, 0x47,
+ 0xf1, 0x8c, 0xd9, 0xe9, 0xf8, 0xe9, 0x1b, 0x66, 0x50, 0xf8, 0x4c, 0x13, 0xbf, 0x4d, 0x55, 0xbe,
+ 0xb6, 0x35, 0xd2, 0x31, 0x35, 0xcb, 0xfd, 0xc7, 0x00, 0xcd, 0x73, 0x72, 0x0c, 0xee, 0x9c, 0xad,
+ 0x4d, 0xc5, 0xa9, 0x76, 0xc9, 0xbb, 0xd0, 0x5e, 0x45, 0x8b, 0xc2, 0xde, 0x56, 0x6a, 0x82, 0x27,
+ 0xad, 0xc7, 0x4e, 0xff, 0x4f, 0x07, 0xba, 0x5b, 0xe3, 0x49, 0xff, 0x78, 0x62, 0x91, 0x30, 0x14,
+ 0x99, 0xd3, 0xfc, 0x78, 0x6a, 0x8c, 0x6e, 0x3c, 0xf2, 0x3e, 0xb4, 0x78, 0x62, 0xe5, 0xb2, 0x5f,
+ 0x95, 0x41, 0x8b, 0x27, 0xb4, 0xc5, 0x93, 0x8d, 0x44, 0xdd, 0x5d, 0x12, 0x25, 0x0f, 0x01, 0x56,
+ 0x2c, 0x97, 0x5c, 0xa4, 0x63, 0x9e, 0x58, 0x19, 0x63, 0xf1, 0x1b, 0x94, 0x76, 0xac, 0x7f, 0x9e,
+ 0x68, 0xc5, 0xd9, 0x60, 0xfb, 0x47, 0x65, 0x21, 0x5a, 0x3b, 0xa3, 0xaf, 0xfe, 0xba, 0x3e, 0x71,
+ 0xfe, 0xbe, 0x3e, 0x71, 0xfe, 0xb9, 0x3e, 0x71, 0x7e, 0xfc, 0x6c, 0xca, 0xd5, 0xac, 0xb8, 0x0c,
+ 0x63, 0xb1, 0x1c, 0x62, 0x39, 0x87, 0x58, 0xce, 0xa1, 0x4c, 0xe6, 0xc3, 0xd5, 0xe9, 0x10, 0xff,
+ 0xd6, 0xdf, 0xa0, 0xbd, 0xdc, 0xc7, 0xe5, 0xd1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x49,
+ 0xf4, 0x5f, 0xe8, 0x07, 0x00, 0x00,
+}
+
+func (m *HostInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.AgentAccessibleDirs) > 0 {
+ i -= len(m.AgentAccessibleDirs)
+ copy(dAtA[i:], m.AgentAccessibleDirs)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.AgentAccessibleDirs)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.Tags) > 0 {
+ for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Tags[iNdEx])
+ copy(dAtA[i:], m.Tags[iNdEx])
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Tags[iNdEx])))
+ i--
+ dAtA[i] = 0x62
+ }
+ }
+ if m.Release != nil {
+ {
+ size, err := m.Release.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.Processor) > 0 {
+ for iNdEx := len(m.Processor) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Processor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if m.Network != nil {
+ {
+ size, err := m.Network.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.Partitons) > 0 {
+ for iNdEx := len(m.Partitons) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Partitons[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.Uname) > 0 {
+ i -= len(m.Uname)
+ copy(dAtA[i:], m.Uname)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Uname)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Uuid) > 0 {
+ i -= len(m.Uuid)
+ copy(dAtA[i:], m.Uuid)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Uuid)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.OsType) > 0 {
+ i -= len(m.OsType)
+ copy(dAtA[i:], m.OsType)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.OsType)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.DisplayName) > 0 {
+ i -= len(m.DisplayName)
+ copy(dAtA[i:], m.DisplayName)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.DisplayName)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Hostname) > 0 {
+ i -= len(m.Hostname)
+ copy(dAtA[i:], m.Hostname)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Hostname)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Boot != 0 {
+ i = encodeVarintHost(dAtA, i, uint64(m.Boot))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Agent) > 0 {
+ i -= len(m.Agent)
+ copy(dAtA[i:], m.Agent)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Agent)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DiskPartition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DiskPartition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DiskPartition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.FsType) > 0 {
+ i -= len(m.FsType)
+ copy(dAtA[i:], m.FsType)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.FsType)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Device) > 0 {
+ i -= len(m.Device)
+ copy(dAtA[i:], m.Device)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Device)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.MountPoint) > 0 {
+ i -= len(m.MountPoint)
+ copy(dAtA[i:], m.MountPoint)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.MountPoint)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Network) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Network) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Network) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Default) > 0 {
+ i -= len(m.Default)
+ copy(dAtA[i:], m.Default)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Default)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Interfaces) > 0 {
+ for iNdEx := len(m.Interfaces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Interfaces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkInterface) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkInterface) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkInterface) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Ipv4) > 0 {
+ for iNdEx := len(m.Ipv4) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ipv4[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Ipv6) > 0 {
+ for iNdEx := len(m.Ipv6) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ipv6[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintHost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Mac) > 0 {
+ i -= len(m.Mac)
+ copy(dAtA[i:], m.Mac)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Mac)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Address) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Address) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Address) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Address) > 0 {
+ i -= len(m.Address)
+ copy(dAtA[i:], m.Address)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Address)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Netmask) > 0 {
+ i -= len(m.Netmask)
+ copy(dAtA[i:], m.Netmask)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Netmask)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Prefixlen != 0 {
+ i = encodeVarintHost(dAtA, i, uint64(m.Prefixlen))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CpuInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CpuInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CpuInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Cache) > 0 {
+ for k := range m.Cache {
+ v := m.Cache[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintHost(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintHost(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintHost(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.Virtualization) > 0 {
+ i -= len(m.Virtualization)
+ copy(dAtA[i:], m.Virtualization)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Virtualization)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Cpus != 0 {
+ i = encodeVarintHost(dAtA, i, uint64(m.Cpus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.Hypervisor) > 0 {
+ i -= len(m.Hypervisor)
+ copy(dAtA[i:], m.Hypervisor)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Hypervisor)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Mhz != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Mhz))))
+ i--
+ dAtA[i] = 0x21
+ }
+ if len(m.Architecture) > 0 {
+ i -= len(m.Architecture)
+ copy(dAtA[i:], m.Architecture)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Architecture)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Cores != 0 {
+ i = encodeVarintHost(dAtA, i, uint64(m.Cores))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Model) > 0 {
+ i -= len(m.Model)
+ copy(dAtA[i:], m.Model)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Model)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ReleaseInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReleaseInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReleaseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.VersionId) > 0 {
+ i -= len(m.VersionId)
+ copy(dAtA[i:], m.VersionId)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.VersionId)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Id) > 0 {
+ i -= len(m.Id)
+ copy(dAtA[i:], m.Id)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Id)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Codename) > 0 {
+ i -= len(m.Codename)
+ copy(dAtA[i:], m.Codename)
+ i = encodeVarintHost(dAtA, i, uint64(len(m.Codename)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintHost(dAtA []byte, offset int, v uint64) int {
+ offset -= sovHost(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *HostInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Agent)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.Boot != 0 {
+ n += 1 + sovHost(uint64(m.Boot))
+ }
+ l = len(m.Hostname)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.DisplayName)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.OsType)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Uuid)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Uname)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if len(m.Partitons) > 0 {
+ for _, e := range m.Partitons {
+ l = e.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ }
+ if m.Network != nil {
+ l = m.Network.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if len(m.Processor) > 0 {
+ for _, e := range m.Processor {
+ l = e.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ }
+ if m.Release != nil {
+ l = m.Release.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if len(m.Tags) > 0 {
+ for _, s := range m.Tags {
+ l = len(s)
+ n += 1 + l + sovHost(uint64(l))
+ }
+ }
+ l = len(m.AgentAccessibleDirs)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DiskPartition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.MountPoint)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Device)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.FsType)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Network) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Interfaces) > 0 {
+ for _, e := range m.Interfaces {
+ l = e.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ }
+ l = len(m.Default)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *NetworkInterface) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Mac)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if len(m.Ipv6) > 0 {
+ for _, e := range m.Ipv6 {
+ l = e.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ }
+ if len(m.Ipv4) > 0 {
+ for _, e := range m.Ipv4 {
+ l = e.Size()
+ n += 1 + l + sovHost(uint64(l))
+ }
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Address) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Prefixlen != 0 {
+ n += 1 + sovHost(uint64(m.Prefixlen))
+ }
+ l = len(m.Netmask)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Address)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CpuInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Model)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.Cores != 0 {
+ n += 1 + sovHost(uint64(m.Cores))
+ }
+ l = len(m.Architecture)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.Mhz != 0 {
+ n += 9
+ }
+ l = len(m.Hypervisor)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.Cpus != 0 {
+ n += 1 + sovHost(uint64(m.Cpus))
+ }
+ l = len(m.Virtualization)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if len(m.Cache) > 0 {
+ for k, v := range m.Cache {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovHost(uint64(len(k))) + 1 + len(v) + sovHost(uint64(len(v)))
+ n += mapEntrySize + 1 + sovHost(uint64(mapEntrySize))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ReleaseInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Codename)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Id)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.VersionId)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovHost(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovHost(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozHost(x uint64) (n int) {
+ return sovHost(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *HostInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HostInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HostInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Agent = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Boot", wireType)
+ }
+ m.Boot = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Boot |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisplayName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OsType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OsType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Uuid = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Uname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Uname = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partitons", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Partitons = append(m.Partitons, &DiskPartition{})
+ if err := m.Partitons[len(m.Partitons)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Network == nil {
+ m.Network = &Network{}
+ }
+ if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Processor", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Processor = append(m.Processor, &CpuInfo{})
+ if err := m.Processor[len(m.Processor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Release", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Release == nil {
+ m.Release = &ReleaseInfo{}
+ }
+ if err := m.Release.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AgentAccessibleDirs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AgentAccessibleDirs = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DiskPartition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DiskPartition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DiskPartition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MountPoint", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MountPoint = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Device = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FsType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FsType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Network) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Network: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Interfaces", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Interfaces = append(m.Interfaces, &NetworkInterface{})
+ if err := m.Interfaces[len(m.Interfaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Default = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkInterface) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkInterface: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkInterface: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mac", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Mac = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ipv6", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ipv6 = append(m.Ipv6, &Address{})
+ if err := m.Ipv6[len(m.Ipv6)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ipv4", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ipv4 = append(m.Ipv4, &Address{})
+ if err := m.Ipv4[len(m.Ipv4)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Address) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Address: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Address: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Prefixlen", wireType)
+ }
+ m.Prefixlen = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Prefixlen |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Netmask", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Netmask = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Address = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CpuInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CpuInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CpuInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Model", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Model = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cores", wireType)
+ }
+ m.Cores = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Cores |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Architecture = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mhz", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Mhz = float64(math.Float64frombits(v))
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hypervisor", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hypervisor = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cpus", wireType)
+ }
+ m.Cpus = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Cpus |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Virtualization", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Virtualization = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Cache == nil {
+ m.Cache = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthHost
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthHost
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Cache[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReleaseInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReleaseInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReleaseInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Codename", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Codename = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Id = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VersionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VersionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthHost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthHost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipHost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthHost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipHost(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowHost
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthHost
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupHost
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthHost
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthHost = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowHost = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupHost = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.proto
new file mode 100644
index 000000000..892d182b2
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/host.proto
@@ -0,0 +1,110 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "gogo.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents the host system information
+message HostInfo {
+ // NGINX Agent version
+ string agent = 1 [(gogoproto.jsontag) = "agent"];
+ // Host boot time
+ uint64 boot = 2 [(gogoproto.jsontag) = "boot"];
+ // Hostname
+ string hostname = 3 [(gogoproto.jsontag) = "hostname"];
+ // Display Name
+ string display_name = 4 [(gogoproto.jsontag) = "display_name"];
+ // OS type (e.g. freebsd, linux, etc)
+ string os_type = 5 [(gogoproto.jsontag) = "os-type"];
+ // Host UUID
+ string uuid = 6 [(gogoproto.jsontag) = "uuid"];
+ // The native cpu architecture queried at runtime, as returned by `uname -m` or empty string in case of error
+ string uname = 7 [(gogoproto.jsontag) = "uname"];
+ // List of disk partitions
+ repeated DiskPartition partitons = 8 [(gogoproto.jsontag) = "disk_partitions"];
+ // Network information
+ Network network = 9 [(gogoproto.jsontag) = "network"];
+ // List of CPU processor information
+ repeated CpuInfo processor = 10 [(gogoproto.jsontag) = "processor"];
+ // Release Information
+ ReleaseInfo release = 11 [(gogoproto.jsontag) = "release"];
+ // List of tags
+ repeated string tags = 12 [(gogoproto.jsontag) = "tags"];
+ // List of directories that the NGINX Agent is allowed to access on the host
+ string agent_accessible_dirs = 13 [(gogoproto.jsontag) = "agent_accessible_dirs"];
+}
+
+// Represents a disk partition
+message DiskPartition {
+ // Mount point location
+ string mount_point = 1 [(gogoproto.jsontag) = "mountpoint"];
+ // Device file path
+ string device = 2 [(gogoproto.jsontag) = "device"];
+ // File system type (e.g. hfs, swap, etc)
+ string fs_type = 3 [(gogoproto.jsontag) = "fstype"];
+}
+
+// Represents a network
+message Network {
+ // List of network interfaces
+ repeated NetworkInterface interfaces = 1 [(gogoproto.jsontag) = "interfaces"];
+ // Default network name
+ string default = 2 [(gogoproto.jsontag) = "default"];
+}
+
+// Represents a network interface
+message NetworkInterface {
+ // MAC address
+ string mac = 1 [(gogoproto.jsontag) = "mac"];
+ // List of IPV6 addresses
+ repeated Address ipv6 = 2 [(gogoproto.jsontag) = "ipv6"];
+ // List of IPV4 addresses
+ repeated Address ipv4 = 3 [(gogoproto.jsontag) = "ipv4"];
+ // Name of network interface
+ string name = 4 [(gogoproto.jsontag) = "name"];
+}
+
+// Represents an IP address
+message Address {
+ // Prefix length
+ int64 prefixlen = 1 [(gogoproto.jsontag) = "prefixlen"];
+ // Netmask
+ string netmask = 2 [(gogoproto.jsontag) = "netmask"];
+ // IP Address
+ string address = 3 [(gogoproto.jsontag) = "address"];
+}
+
+// Represents CPU information
+message CpuInfo {
+ // Model of CPU
+ string model = 1 [(gogoproto.jsontag) = "model"];
+ // Number of cores
+ int32 cores = 2 [(gogoproto.jsontag) = "cores"];
+ // CPU architecture
+ string architecture = 3 [(gogoproto.jsontag) = "architecture"];
+ // CPU clock speed in MHz
+ double mhz = 4 [(gogoproto.jsontag) = "mhz"];
+ // Hypervisor (e.g. VMWare, KVM, etc.)
+ string hypervisor = 5 [(gogoproto.jsontag) = "hypervisor"];
+ // Total number of CPUs
+ int32 cpus = 6 [(gogoproto.jsontag) = "cpus"];
+ // Type of hypervisor (e.g guest or host)
+ string virtualization = 7 [(gogoproto.jsontag) = "virtualization"];
+ // Map of caches with names as the keys and size in bytes as the values
+ map cache = 8 [(gogoproto.jsontag) = "cache"];
+}
+
+// Represents release information
+message ReleaseInfo {
+ // OS type (e.g. freebsd, linux, etc)
+ string codename = 1 [(gogoproto.jsontag) = "codename"];
+ // OS name (e.g. ubuntu, linuxmint, etc)
+ string id = 2 [(gogoproto.jsontag) = "id"];
+ // OS family (e.g. debian, rhel)
+ string name = 3 [(gogoproto.jsontag) = "name"];
+ // Version of the OS kernel
+ string version_id = 4 [(gogoproto.jsontag) = "version_id"];
+ // Version of the OS
+ string version = 5 [(gogoproto.jsontag) = "version"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.pb.go
new file mode 100644
index 000000000..ee8c82794
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.pb.go
@@ -0,0 +1,1255 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: metrics.proto
+
+package proto
+
+import (
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ types "github.com/gogo/protobuf/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Metric type enum
+type MetricsReport_Type int32
+
+const (
+ // System metric type
+ MetricsReport_SYSTEM MetricsReport_Type = 0
+ // NGINX instance metric type
+ MetricsReport_INSTANCE MetricsReport_Type = 1
+ // Agent metric type
+ MetricsReport_AGENT MetricsReport_Type = 2
+)
+
+var MetricsReport_Type_name = map[int32]string{
+ 0: "SYSTEM",
+ 1: "INSTANCE",
+ 2: "AGENT",
+}
+
+var MetricsReport_Type_value = map[string]int32{
+ "SYSTEM": 0,
+ "INSTANCE": 1,
+ "AGENT": 2,
+}
+
+func (x MetricsReport_Type) String() string {
+ return proto.EnumName(MetricsReport_Type_name, int32(x))
+}
+
+func (MetricsReport_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_6039342a2ba47b72, []int{0, 0}
+}
+
+// Represents a metric report
+type MetricsReport struct {
+ // Provides meta information about the metrics
+ Meta *Metadata `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta"`
+ // Type of metrics
+ Type MetricsReport_Type `protobuf:"varint,2,opt,name=type,proto3,enum=f5.nginx.agent.sdk.MetricsReport_Type" json:"type"`
+ // List of stats entities
+ Data []*StatsEntity `protobuf:"bytes,3,rep,name=data,proto3" json:"data"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MetricsReport) Reset() { *m = MetricsReport{} }
+func (m *MetricsReport) String() string { return proto.CompactTextString(m) }
+func (*MetricsReport) ProtoMessage() {}
+func (*MetricsReport) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6039342a2ba47b72, []int{0}
+}
+func (m *MetricsReport) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MetricsReport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MetricsReport.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MetricsReport) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricsReport.Merge(m, src)
+}
+func (m *MetricsReport) XXX_Size() int {
+ return m.Size()
+}
+func (m *MetricsReport) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricsReport.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricsReport proto.InternalMessageInfo
+
+func (m *MetricsReport) GetMeta() *Metadata {
+ if m != nil {
+ return m.Meta
+ }
+ return nil
+}
+
+func (m *MetricsReport) GetType() MetricsReport_Type {
+ if m != nil {
+ return m.Type
+ }
+ return MetricsReport_SYSTEM
+}
+
+func (m *MetricsReport) GetData() []*StatsEntity {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+// Represents a simple metric
+type SimpleMetric struct {
+ // Metric name
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"`
+ // Metric value
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SimpleMetric) Reset() { *m = SimpleMetric{} }
+func (m *SimpleMetric) String() string { return proto.CompactTextString(m) }
+func (*SimpleMetric) ProtoMessage() {}
+func (*SimpleMetric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6039342a2ba47b72, []int{1}
+}
+func (m *SimpleMetric) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SimpleMetric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SimpleMetric.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SimpleMetric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SimpleMetric.Merge(m, src)
+}
+func (m *SimpleMetric) XXX_Size() int {
+ return m.Size()
+}
+func (m *SimpleMetric) XXX_DiscardUnknown() {
+ xxx_messageInfo_SimpleMetric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SimpleMetric proto.InternalMessageInfo
+
+func (m *SimpleMetric) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *SimpleMetric) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Represents a dimension which is a dimensional attribute used when classifying and categorizing data
+type Dimension struct {
+ // Dimension name
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"`
+ // Dimension value
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Dimension) Reset() { *m = Dimension{} }
+func (m *Dimension) String() string { return proto.CompactTextString(m) }
+func (*Dimension) ProtoMessage() {}
+func (*Dimension) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6039342a2ba47b72, []int{2}
+}
+func (m *Dimension) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Dimension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Dimension.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Dimension) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Dimension.Merge(m, src)
+}
+func (m *Dimension) XXX_Size() int {
+ return m.Size()
+}
+func (m *Dimension) XXX_DiscardUnknown() {
+ xxx_messageInfo_Dimension.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Dimension proto.InternalMessageInfo
+
+func (m *Dimension) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Dimension) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// Represents a stats entity which is a timestamped entry for dimensions and metrics
+type StatsEntity struct {
+ // Timestamp defines the time of stats entity creation
+ Timestamp *types.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // List of dimensions
+ Dimensions []*Dimension `protobuf:"bytes,3,rep,name=dimensions,proto3" json:"dimensions"`
+ // List of metrics
+ Simplemetrics []*SimpleMetric `protobuf:"bytes,4,rep,name=simplemetrics,proto3" json:"simplemetrics"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StatsEntity) Reset() { *m = StatsEntity{} }
+func (m *StatsEntity) String() string { return proto.CompactTextString(m) }
+func (*StatsEntity) ProtoMessage() {}
+func (*StatsEntity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6039342a2ba47b72, []int{3}
+}
+func (m *StatsEntity) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatsEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_StatsEntity.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *StatsEntity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatsEntity.Merge(m, src)
+}
+func (m *StatsEntity) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatsEntity) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatsEntity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatsEntity proto.InternalMessageInfo
+
+func (m *StatsEntity) GetTimestamp() *types.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *StatsEntity) GetDimensions() []*Dimension {
+ if m != nil {
+ return m.Dimensions
+ }
+ return nil
+}
+
+func (m *StatsEntity) GetSimplemetrics() []*SimpleMetric {
+ if m != nil {
+ return m.Simplemetrics
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("f5.nginx.agent.sdk.MetricsReport_Type", MetricsReport_Type_name, MetricsReport_Type_value)
+ proto.RegisterType((*MetricsReport)(nil), "f5.nginx.agent.sdk.MetricsReport")
+ proto.RegisterType((*SimpleMetric)(nil), "f5.nginx.agent.sdk.SimpleMetric")
+ proto.RegisterType((*Dimension)(nil), "f5.nginx.agent.sdk.Dimension")
+ proto.RegisterType((*StatsEntity)(nil), "f5.nginx.agent.sdk.StatsEntity")
+}
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
+
+var fileDescriptor_6039342a2ba47b72 = []byte{
+ // 460 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0xd3, 0x30,
+ 0x14, 0xc6, 0x6d, 0x36, 0x9a, 0xd7, 0x76, 0x2a, 0x3e, 0x55, 0xd5, 0x68, 0xaa, 0x1e, 0x50, 0x11,
+ 0x92, 0x23, 0x15, 0x21, 0x10, 0x88, 0xc3, 0xc2, 0x2a, 0xc4, 0xa4, 0xf4, 0xe0, 0xe4, 0x32, 0x6e,
+ 0x5e, 0xeb, 0x85, 0x68, 0x75, 0x1c, 0xd5, 0xee, 0x44, 0x6f, 0xfc, 0x3c, 0x8e, 0xfc, 0x82, 0x0a,
+ 0xf5, 0xd8, 0xff, 0x80, 0x84, 0x62, 0xa7, 0x5b, 0x27, 0xc2, 0x61, 0x97, 0x67, 0x3f, 0xeb, 0x7d,
+ 0xdf, 0x7b, 0xef, 0xfb, 0x0c, 0x6d, 0xc1, 0xf5, 0x32, 0x9d, 0x29, 0x92, 0x2f, 0xa5, 0x96, 0x18,
+ 0x5f, 0xbf, 0x21, 0x59, 0x92, 0x66, 0xdf, 0x09, 0x4b, 0x78, 0xa6, 0x89, 0x9a, 0xdf, 0xf4, 0x5a,
+ 0x33, 0x29, 0x84, 0xcc, 0x6c, 0x45, 0x0f, 0x12, 0x99, 0xc8, 0xf2, 0xee, 0x25, 0x52, 0x26, 0x0b,
+ 0xee, 0x9b, 0xec, 0x6a, 0x75, 0xed, 0xeb, 0x54, 0x70, 0xa5, 0x99, 0xc8, 0x6d, 0xc1, 0xf0, 0x0f,
+ 0x82, 0x76, 0x68, 0x1b, 0x50, 0x9e, 0xcb, 0xa5, 0xc6, 0xef, 0xc1, 0x11, 0x5c, 0xb3, 0x2e, 0x1a,
+ 0xa0, 0x51, 0x73, 0x7c, 0x4a, 0xfe, 0xed, 0x47, 0x42, 0xae, 0xd9, 0x9c, 0x69, 0x16, 0x34, 0x76,
+ 0x1b, 0xcf, 0x54, 0x53, 0x13, 0xf1, 0x39, 0x38, 0x7a, 0x9d, 0xf3, 0x6e, 0x6d, 0x80, 0x46, 0x27,
+ 0xe3, 0x17, 0xff, 0xc1, 0xde, 0x37, 0x23, 0xf1, 0x3a, 0xe7, 0x96, 0xa5, 0xc0, 0x51, 0x13, 0xf1,
+ 0x47, 0x70, 0x0a, 0xf6, 0x6e, 0x7d, 0x50, 0x1f, 0x35, 0xc7, 0x5e, 0x15, 0x4b, 0xa4, 0x99, 0x56,
+ 0x93, 0x4c, 0xa7, 0x7a, 0x6d, 0xe1, 0x05, 0x80, 0x9a, 0x38, 0x7c, 0x05, 0x4e, 0x41, 0x8b, 0x01,
+ 0x8e, 0xa3, 0xcb, 0x28, 0x9e, 0x84, 0x9d, 0x27, 0xb8, 0x05, 0x8d, 0x2f, 0xd3, 0x28, 0x3e, 0x9b,
+ 0x7e, 0x9a, 0x74, 0x10, 0x76, 0xe1, 0xe8, 0xec, 0xf3, 0x64, 0x1a, 0x77, 0x6a, 0xc3, 0x10, 0x5a,
+ 0x51, 0x2a, 0xf2, 0x05, 0xb7, 0x73, 0xe1, 0x53, 0x70, 0x32, 0x26, 0xb8, 0xd9, 0xde, 0xb5, 0xd4,
+ 0x45, 0x4e, 0x4d, 0xc4, 0x1e, 0x1c, 0xdd, 0xb2, 0xc5, 0xca, 0x2e, 0x88, 0x02, 0x77, 0xb7, 0xf1,
+ 0xec, 0x03, 0xb5, 0xc7, 0xf0, 0x02, 0xdc, 0xf3, 0x54, 0xf0, 0x4c, 0xa5, 0x32, 0x7b, 0x0c, 0x97,
+ 0x5b, 0xc1, 0xf5, 0xa3, 0x06, 0xcd, 0x83, 0x3d, 0xf1, 0x3b, 0x70, 0xef, 0xdc, 0x2b, 0xdd, 0xe9,
+ 0x11, 0xeb, 0x2f, 0xd9, 0xfb, 0x4b, 0xe2, 0x7d, 0x05, 0xbd, 0x2f, 0xc6, 0x21, 0xc0, 0x7c, 0x3f,
+ 0x95, 0x2a, 0x65, 0x7d, 0x5e, 0x25, 0xeb, 0xdd, 0xec, 0xc1, 0xc9, 0x6e, 0xe3, 0x1d, 0x80, 0xe8,
+ 0xc1, 0x1d, 0x5f, 0x42, 0x5b, 0x19, 0xcd, 0xca, 0x9f, 0xd9, 0x75, 0x0c, 0xe3, 0xa0, 0xd2, 0xa8,
+ 0x03, 0x71, 0x83, 0x67, 0xbb, 0x8d, 0xf7, 0x10, 0x4a, 0x1f, 0xa6, 0x17, 0x4e, 0xa3, 0xd6, 0xa9,
+ 0xd3, 0xa7, 0x65, 0x1a, 0xbc, 0xfd, 0xb9, 0xed, 0xa3, 0x5f, 0xdb, 0x3e, 0xfa, 0xbd, 0xed, 0xa3,
+ 0xaf, 0x2f, 0x93, 0x54, 0x7f, 0x5b, 0x5d, 0x91, 0x99, 0x14, 0xbe, 0xe9, 0xe3, 0x9b, 0x3e, 0xbe,
+ 0x9a, 0xdf, 0xf8, 0xb7, 0x63, 0xfb, 0xc7, 0x3f, 0x58, 0x25, 0x8e, 0xcd, 0xf1, 0xfa, 0x6f, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xe6, 0xcd, 0x1e, 0xb5, 0x3d, 0x03, 0x00, 0x00,
+}
+
+func (m *MetricsReport) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MetricsReport) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MetricsReport) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Data) > 0 {
+ for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Data[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMetrics(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Type != 0 {
+ i = encodeVarintMetrics(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Meta != nil {
+ {
+ size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMetrics(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SimpleMetric) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SimpleMetric) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SimpleMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x11
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Dimension) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Dimension) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Dimension) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintMetrics(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatsEntity) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatsEntity) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatsEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Simplemetrics) > 0 {
+ for iNdEx := len(m.Simplemetrics) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Simplemetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMetrics(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Dimensions) > 0 {
+ for iNdEx := len(m.Dimensions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Dimensions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMetrics(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Timestamp != nil {
+ {
+ size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMetrics(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
+ offset -= sovMetrics(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MetricsReport) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Meta != nil {
+ l = m.Meta.Size()
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ if m.Type != 0 {
+ n += 1 + sovMetrics(uint64(m.Type))
+ }
+ if len(m.Data) > 0 {
+ for _, e := range m.Data {
+ l = e.Size()
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SimpleMetric) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Dimension) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *StatsEntity) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Timestamp != nil {
+ l = m.Timestamp.Size()
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ if len(m.Dimensions) > 0 {
+ for _, e := range m.Dimensions {
+ l = e.Size()
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ }
+ if len(m.Simplemetrics) > 0 {
+ for _, e := range m.Simplemetrics {
+ l = e.Size()
+ n += 1 + l + sovMetrics(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovMetrics(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozMetrics(x uint64) (n int) {
+ return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MetricsReport) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MetricsReport: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MetricsReport: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Meta == nil {
+ m.Meta = &Metadata{}
+ }
+ if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= MetricsReport_Type(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data, &StatsEntity{})
+ if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMetrics(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SimpleMetric) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SimpleMetric: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SimpleMetric: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMetrics(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Dimension) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Dimension: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Dimension: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMetrics(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatsEntity) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatsEntity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatsEntity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Timestamp == nil {
+ m.Timestamp = &types.Timestamp{}
+ }
+ if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dimensions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Dimensions = append(m.Dimensions, &Dimension{})
+ if err := m.Dimensions[len(m.Dimensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Simplemetrics", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Simplemetrics = append(m.Simplemetrics, &SimpleMetric{})
+ if err := m.Simplemetrics[len(m.Simplemetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMetrics(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMetrics
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipMetrics(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowMetrics
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthMetrics
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupMetrics
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthMetrics
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.proto
new file mode 100644
index 000000000..e4cba46fd
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.proto
@@ -0,0 +1,56 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "common.proto";
+import "gogo.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents a metric report
+message MetricsReport {
+ // Metric type enum
+ enum Type {
+ // System metric type
+ SYSTEM = 0;
+ // NGINX instance metric type
+ INSTANCE = 1;
+ // Agent metric type
+ AGENT = 2;
+ }
+ // Provides meta information about the metrics
+ Metadata meta = 1 [(gogoproto.jsontag) = "meta"];
+ // Type of metrics
+ Type type = 2 [(gogoproto.jsontag) = "type"];
+ // List of stats entities
+ repeated StatsEntity data = 3 [(gogoproto.jsontag) = "data"];
+}
+
+// Represents a simple metric
+message SimpleMetric {
+ // Metric name
+ string name = 1 [(gogoproto.jsontag) = "name"];
+ // Metric value
+ double value = 2 [(gogoproto.jsontag) = "value"];
+}
+
+// Represents a dimension which is a dimensional attribute used when classifying and categorizing data
+message Dimension {
+ // Dimension name
+ string name = 1 [(gogoproto.jsontag) = "name"];
+ // Dimension value
+ string value = 2 [(gogoproto.jsontag) = "value"];
+}
+
+// Represents a stats entity which is a timestamped entry for dimensions and metrics
+message StatsEntity {
+ // Timestamp defines the time of stats entity creation
+ google.protobuf.Timestamp timestamp = 1;
+ // DEPRECATED
+ reserved 2;
+ reserved "metrics";
+ // List of dimensions
+ repeated Dimension dimensions = 3 [(gogoproto.jsontag) = "dimensions"];
+ // List of metrics
+ repeated SimpleMetric simplemetrics = 4 [(gogoproto.jsontag) = "simplemetrics"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.pb.go
new file mode 100644
index 000000000..781af9dd3
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: metrics.svc.proto
+
+package proto
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/gogo/protobuf/proto"
+ types "github.com/gogo/protobuf/types"
+ events "github.com/nginx/agent/sdk/v2/proto/events"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func init() { proto.RegisterFile("metrics.svc.proto", fileDescriptor_ece8a4321458910f) }
+
+var fileDescriptor_ece8a4321458910f = []byte{
+ // 228 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4d, 0x2d, 0x29,
+ 0xca, 0x4c, 0x2e, 0xd6, 0x2b, 0x2e, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a,
+ 0x33, 0xd5, 0xcb, 0x4b, 0xcf, 0xcc, 0xab, 0xd0, 0x4b, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x2b, 0x4e,
+ 0xc9, 0x96, 0x12, 0x4a, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0xd6, 0x07, 0x53, 0x10, 0x75, 0x52, 0xd2,
+ 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0x5e, 0x52, 0x69, 0x9a, 0x7e, 0x6a, 0x6e, 0x41,
+ 0x49, 0x25, 0x54, 0x92, 0x17, 0x66, 0x2e, 0x98, 0x6b, 0xb4, 0x96, 0x91, 0x8b, 0xcf, 0x17, 0x22,
+ 0x12, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, 0xe4, 0xce, 0xc5, 0x16, 0x5c, 0x52, 0x94, 0x9a,
+ 0x98, 0x2b, 0xa4, 0xa8, 0x87, 0x69, 0xa3, 0x1e, 0x54, 0x75, 0x50, 0x6a, 0x41, 0x7e, 0x51, 0x89,
+ 0x94, 0x98, 0x1e, 0xc4, 0x32, 0x3d, 0x98, 0x65, 0x7a, 0xae, 0x20, 0xcb, 0x94, 0x18, 0x34, 0x18,
+ 0x85, 0x82, 0xb8, 0x78, 0x20, 0x06, 0xb9, 0x82, 0xdd, 0x28, 0xa4, 0x86, 0xcd, 0x38, 0x88, 0xfb,
+ 0xf5, 0xc0, 0x4a, 0x08, 0x9b, 0xe9, 0x64, 0x7e, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c,
+ 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0x69, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7,
+ 0xea, 0x83, 0x0d, 0xd6, 0x07, 0x1b, 0xac, 0x5f, 0x9c, 0x92, 0xad, 0x5f, 0x66, 0x04, 0x09, 0x00,
+ 0x6b, 0x88, 0x29, 0x6c, 0x60, 0xca, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x70, 0x1f, 0xc3, 0xc1,
+ 0x58, 0x01, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MetricsServiceClient is the client API for MetricsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricsServiceClient interface {
+ // A client-to-server streaming RPC to deliver high volume metrics reports.
+ Stream(ctx context.Context, opts ...grpc.CallOption) (MetricsService_StreamClient, error)
+ // A client-to-server streaming RPC to deliver high volume event reports.
+ StreamEvents(ctx context.Context, opts ...grpc.CallOption) (MetricsService_StreamEventsClient, error)
+}
+
+type metricsServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
+ return &metricsServiceClient{cc}
+}
+
+func (c *metricsServiceClient) Stream(ctx context.Context, opts ...grpc.CallOption) (MetricsService_StreamClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/f5.nginx.agent.sdk.MetricsService/Stream", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &metricsServiceStreamClient{stream}
+ return x, nil
+}
+
+type MetricsService_StreamClient interface {
+ Send(*MetricsReport) error
+ CloseAndRecv() (*types.Empty, error)
+ grpc.ClientStream
+}
+
+type metricsServiceStreamClient struct {
+ grpc.ClientStream
+}
+
+func (x *metricsServiceStreamClient) Send(m *MetricsReport) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *metricsServiceStreamClient) CloseAndRecv() (*types.Empty, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(types.Empty)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *metricsServiceClient) StreamEvents(ctx context.Context, opts ...grpc.CallOption) (MetricsService_StreamEventsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[1], "/f5.nginx.agent.sdk.MetricsService/StreamEvents", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &metricsServiceStreamEventsClient{stream}
+ return x, nil
+}
+
+type MetricsService_StreamEventsClient interface {
+ Send(*events.EventReport) error
+ CloseAndRecv() (*types.Empty, error)
+ grpc.ClientStream
+}
+
+type metricsServiceStreamEventsClient struct {
+ grpc.ClientStream
+}
+
+func (x *metricsServiceStreamEventsClient) Send(m *events.EventReport) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *metricsServiceStreamEventsClient) CloseAndRecv() (*types.Empty, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(types.Empty)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// MetricsServiceServer is the server API for MetricsService service.
+type MetricsServiceServer interface {
+ // A client-to-server streaming RPC to deliver high volume metrics reports.
+ Stream(MetricsService_StreamServer) error
+ // A client-to-server streaming RPC to deliver high volume event reports.
+ StreamEvents(MetricsService_StreamEventsServer) error
+}
+
+// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedMetricsServiceServer struct {
+}
+
+func (*UnimplementedMetricsServiceServer) Stream(srv MetricsService_StreamServer) error {
+ return status.Errorf(codes.Unimplemented, "method Stream not implemented")
+}
+func (*UnimplementedMetricsServiceServer) StreamEvents(srv MetricsService_StreamEventsServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamEvents not implemented")
+}
+
+func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
+ s.RegisterService(&_MetricsService_serviceDesc, srv)
+}
+
+func _MetricsService_Stream_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(MetricsServiceServer).Stream(&metricsServiceStreamServer{stream})
+}
+
+type MetricsService_StreamServer interface {
+ SendAndClose(*types.Empty) error
+ Recv() (*MetricsReport, error)
+ grpc.ServerStream
+}
+
+type metricsServiceStreamServer struct {
+ grpc.ServerStream
+}
+
+func (x *metricsServiceStreamServer) SendAndClose(m *types.Empty) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *metricsServiceStreamServer) Recv() (*MetricsReport, error) {
+ m := new(MetricsReport)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _MetricsService_StreamEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(MetricsServiceServer).StreamEvents(&metricsServiceStreamEventsServer{stream})
+}
+
+type MetricsService_StreamEventsServer interface {
+ SendAndClose(*types.Empty) error
+ Recv() (*events.EventReport, error)
+ grpc.ServerStream
+}
+
+type metricsServiceStreamEventsServer struct {
+ grpc.ServerStream
+}
+
+func (x *metricsServiceStreamEventsServer) SendAndClose(m *types.Empty) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *metricsServiceStreamEventsServer) Recv() (*events.EventReport, error) {
+ m := new(events.EventReport)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _MetricsService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "f5.nginx.agent.sdk.MetricsService",
+ HandlerType: (*MetricsServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Stream",
+ Handler: _MetricsService_Stream_Handler,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "StreamEvents",
+ Handler: _MetricsService_StreamEvents_Handler,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "metrics.svc.proto",
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.proto
new file mode 100644
index 000000000..ef22b6888
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/metrics.svc.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "events/event.proto";
+import "google/protobuf/empty.proto";
+import "metrics.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents a metrics service which is responsible for ingesting high volume metrics and events
+service MetricsService {
+ // A client-to-server streaming RPC to deliver high volume metrics reports.
+ rpc Stream(stream MetricsReport) returns (google.protobuf.Empty) {}
+
+ // A client-to-server streaming RPC to deliver high volume event reports.
+ rpc StreamEvents(stream f5.nginx.agent.sdk.events.EventReport) returns (google.protobuf.Empty) {}
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.pb.go
new file mode 100644
index 000000000..56526ebd1
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.pb.go
@@ -0,0 +1,815 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: nap.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// Status enum
+type AppProtectWAFHealth_AppProtectWAFStatus int32
+
+const (
+ // Unknown status
+ AppProtectWAFHealth_UNKNOWN AppProtectWAFHealth_AppProtectWAFStatus = 0
+ // Active status
+ AppProtectWAFHealth_ACTIVE AppProtectWAFHealth_AppProtectWAFStatus = 1
+ // Degraded status
+ AppProtectWAFHealth_DEGRADED AppProtectWAFHealth_AppProtectWAFStatus = 2
+)
+
+var AppProtectWAFHealth_AppProtectWAFStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ACTIVE",
+ 2: "DEGRADED",
+}
+
+var AppProtectWAFHealth_AppProtectWAFStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ACTIVE": 1,
+ "DEGRADED": 2,
+}
+
+func (x AppProtectWAFHealth_AppProtectWAFStatus) String() string {
+ return proto.EnumName(AppProtectWAFHealth_AppProtectWAFStatus_name, int32(x))
+}
+
+func (AppProtectWAFHealth_AppProtectWAFStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_f34234efeae954d9, []int{1, 0}
+}
+
+// Represents App Protect WAF details
+type AppProtectWAFDetails struct {
+ // WAF version
+ WafVersion string `protobuf:"bytes,1,opt,name=waf_version,json=wafVersion,proto3" json:"waf_version"`
+ // Attack signatures version (This is being deprecated and will be removed in a future release)
+ AttackSignaturesVersion string `protobuf:"bytes,2,opt,name=attack_signatures_version,json=attackSignaturesVersion,proto3" json:"attack_signatures_version"`
+ // Threat signatures version (This is being deprecated and will be removed in a future release)
+ ThreatCampaignsVersion string `protobuf:"bytes,3,opt,name=threat_campaigns_version,json=threatCampaignsVersion,proto3" json:"threat_campaigns_version"`
+ // App Protect Health details (This is being deprecated and will be removed in a future release)
+ Health *AppProtectWAFHealth `protobuf:"bytes,4,opt,name=health,proto3" json:"health"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AppProtectWAFDetails) Reset() { *m = AppProtectWAFDetails{} }
+func (m *AppProtectWAFDetails) String() string { return proto.CompactTextString(m) }
+func (*AppProtectWAFDetails) ProtoMessage() {}
+func (*AppProtectWAFDetails) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f34234efeae954d9, []int{0}
+}
+func (m *AppProtectWAFDetails) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AppProtectWAFDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AppProtectWAFDetails.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AppProtectWAFDetails) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AppProtectWAFDetails.Merge(m, src)
+}
+func (m *AppProtectWAFDetails) XXX_Size() int {
+ return m.Size()
+}
+func (m *AppProtectWAFDetails) XXX_DiscardUnknown() {
+ xxx_messageInfo_AppProtectWAFDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AppProtectWAFDetails proto.InternalMessageInfo
+
+func (m *AppProtectWAFDetails) GetWafVersion() string {
+ if m != nil {
+ return m.WafVersion
+ }
+ return ""
+}
+
+func (m *AppProtectWAFDetails) GetAttackSignaturesVersion() string {
+ if m != nil {
+ return m.AttackSignaturesVersion
+ }
+ return ""
+}
+
+func (m *AppProtectWAFDetails) GetThreatCampaignsVersion() string {
+ if m != nil {
+ return m.ThreatCampaignsVersion
+ }
+ return ""
+}
+
+func (m *AppProtectWAFDetails) GetHealth() *AppProtectWAFHealth {
+ if m != nil {
+ return m.Health
+ }
+ return nil
+}
+
+// Represents the health of App Protect WAF
+type AppProtectWAFHealth struct {
+ // System ID
+ SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id"`
+ // App Protect WAF status
+ AppProtectWafStatus AppProtectWAFHealth_AppProtectWAFStatus `protobuf:"varint,2,opt,name=app_protect_waf_status,json=appProtectWafStatus,proto3,enum=f5.nginx.agent.sdk.AppProtectWAFHealth_AppProtectWAFStatus" json:"app_protect_waf_status"`
+ // Provides an error message of why App Protect WAF is degraded
+ DegradedReason string `protobuf:"bytes,3,opt,name=degraded_reason,json=degradedReason,proto3" json:"degraded_reason"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AppProtectWAFHealth) Reset() { *m = AppProtectWAFHealth{} }
+func (m *AppProtectWAFHealth) String() string { return proto.CompactTextString(m) }
+func (*AppProtectWAFHealth) ProtoMessage() {}
+func (*AppProtectWAFHealth) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f34234efeae954d9, []int{1}
+}
+func (m *AppProtectWAFHealth) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AppProtectWAFHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AppProtectWAFHealth.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AppProtectWAFHealth) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AppProtectWAFHealth.Merge(m, src)
+}
+func (m *AppProtectWAFHealth) XXX_Size() int {
+ return m.Size()
+}
+func (m *AppProtectWAFHealth) XXX_DiscardUnknown() {
+ xxx_messageInfo_AppProtectWAFHealth.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AppProtectWAFHealth proto.InternalMessageInfo
+
+func (m *AppProtectWAFHealth) GetSystemId() string {
+ if m != nil {
+ return m.SystemId
+ }
+ return ""
+}
+
+func (m *AppProtectWAFHealth) GetAppProtectWafStatus() AppProtectWAFHealth_AppProtectWAFStatus {
+ if m != nil {
+ return m.AppProtectWafStatus
+ }
+ return AppProtectWAFHealth_UNKNOWN
+}
+
+func (m *AppProtectWAFHealth) GetDegradedReason() string {
+ if m != nil {
+ return m.DegradedReason
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("f5.nginx.agent.sdk.AppProtectWAFHealth_AppProtectWAFStatus", AppProtectWAFHealth_AppProtectWAFStatus_name, AppProtectWAFHealth_AppProtectWAFStatus_value)
+ proto.RegisterType((*AppProtectWAFDetails)(nil), "f5.nginx.agent.sdk.AppProtectWAFDetails")
+ proto.RegisterType((*AppProtectWAFHealth)(nil), "f5.nginx.agent.sdk.AppProtectWAFHealth")
+}
+
+func init() { proto.RegisterFile("nap.proto", fileDescriptor_f34234efeae954d9) }
+
+var fileDescriptor_f34234efeae954d9 = []byte{
+ // 444 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x6e, 0xd3, 0x40,
+ 0x14, 0x45, 0x99, 0x80, 0x42, 0xf3, 0x02, 0x6d, 0x34, 0x41, 0x25, 0x54, 0x10, 0x57, 0xd9, 0x50,
+ 0x58, 0xd8, 0x28, 0x08, 0xb1, 0x68, 0x37, 0x49, 0x13, 0xa0, 0xaa, 0x14, 0xd0, 0x14, 0x52, 0xc1,
+ 0xc6, 0x7a, 0x8d, 0x27, 0x63, 0x2b, 0x8d, 0x3d, 0xf2, 0x4c, 0x5a, 0xf8, 0x07, 0x3e, 0xac, 0x0b,
+ 0x16, 0x7c, 0x81, 0x85, 0xb2, 0xf4, 0x57, 0x20, 0xc6, 0x76, 0x42, 0x03, 0x91, 0xba, 0xf1, 0x8c,
+ 0xdf, 0xbd, 0xf7, 0xc8, 0xef, 0xca, 0x50, 0x09, 0x51, 0xda, 0x32, 0x8e, 0x74, 0x44, 0xe9, 0xf8,
+ 0x95, 0x1d, 0x8a, 0x20, 0xfc, 0x6a, 0xa3, 0xe0, 0xa1, 0xb6, 0x95, 0x37, 0xd9, 0x01, 0x11, 0x89,
+ 0x28, 0xd3, 0x5b, 0x57, 0x25, 0x78, 0xd0, 0x91, 0xf2, 0x43, 0x1c, 0x69, 0x3e, 0xd2, 0xa7, 0x9d,
+ 0x37, 0x3d, 0xae, 0x31, 0x38, 0x57, 0xf4, 0x05, 0x54, 0x2f, 0x71, 0xec, 0x5e, 0xf0, 0x58, 0x05,
+ 0x51, 0xd8, 0x20, 0xbb, 0x64, 0xaf, 0xd2, 0xdd, 0x4a, 0x13, 0xeb, 0xef, 0x31, 0x83, 0x4b, 0x1c,
+ 0x0f, 0xb3, 0x3b, 0xfd, 0x0c, 0x8f, 0x50, 0x6b, 0x1c, 0x4d, 0x5c, 0x15, 0x88, 0x10, 0xf5, 0x2c,
+ 0xe6, 0x6a, 0x91, 0x2f, 0x99, 0xfc, 0x93, 0x34, 0xb1, 0xd6, 0x9b, 0xd8, 0xc3, 0x4c, 0x3a, 0x59,
+ 0x28, 0x05, 0x7a, 0x08, 0x0d, 0xed, 0xc7, 0x1c, 0xb5, 0x3b, 0xc2, 0xa9, 0xc4, 0x40, 0x84, 0x4b,
+ 0xf2, 0x6d, 0x43, 0x7e, 0x9c, 0x26, 0xd6, 0x5a, 0x0f, 0xdb, 0xce, 0x94, 0xc3, 0x42, 0x28, 0xb8,
+ 0xc7, 0x50, 0xf6, 0x39, 0x9e, 0x6b, 0xbf, 0x71, 0x67, 0x97, 0xec, 0x55, 0xdb, 0x4f, 0xed, 0x7f,
+ 0xeb, 0xb2, 0xaf, 0xd5, 0xf3, 0xce, 0xd8, 0xbb, 0x90, 0x26, 0x56, 0x1e, 0x65, 0xf9, 0xd9, 0xfa,
+ 0x51, 0x82, 0xfa, 0x7f, 0xbc, 0xf4, 0x39, 0x54, 0xd4, 0x37, 0xa5, 0xf9, 0xd4, 0x0d, 0xbc, 0xbc,
+ 0xc7, 0xfb, 0x69, 0x62, 0x2d, 0x87, 0x6c, 0x23, 0xbb, 0x1e, 0x79, 0xf4, 0x3b, 0x81, 0x6d, 0x94,
+ 0xd2, 0x95, 0x19, 0xc4, 0xfd, 0xd3, 0xb5, 0xd2, 0xa8, 0x67, 0xca, 0x34, 0xb8, 0xd9, 0xde, 0xbf,
+ 0xe1, 0x17, 0x5e, 0x9f, 0x9d, 0x18, 0x44, 0x77, 0x27, 0x4d, 0xac, 0x35, 0x78, 0x56, 0xc7, 0x65,
+ 0x00, 0xc7, 0x59, 0x80, 0x1e, 0xc0, 0x96, 0xc7, 0x45, 0x8c, 0x1e, 0xf7, 0xdc, 0x98, 0xa3, 0x5a,
+ 0xd4, 0x5d, 0x4f, 0x13, 0x6b, 0x55, 0x62, 0x9b, 0xc5, 0x80, 0x99, 0xf7, 0xd6, 0xc1, 0x4a, 0x1f,
+ 0x39, 0xb4, 0x0a, 0x77, 0x3f, 0x0d, 0x8e, 0x07, 0xef, 0x4f, 0x07, 0xb5, 0x5b, 0x14, 0xa0, 0xdc,
+ 0x39, 0xfc, 0x78, 0x34, 0xec, 0xd7, 0x08, 0xbd, 0x07, 0x1b, 0xbd, 0xfe, 0x5b, 0xd6, 0xe9, 0xf5,
+ 0x7b, 0xb5, 0x52, 0xf7, 0xf5, 0xd5, 0xbc, 0x49, 0x7e, 0xce, 0x9b, 0xe4, 0xd7, 0xbc, 0x49, 0xbe,
+ 0x3c, 0x13, 0x81, 0xf6, 0x67, 0x67, 0xf6, 0x28, 0x9a, 0x3a, 0x66, 0x7d, 0xc7, 0xac, 0xef, 0x28,
+ 0x6f, 0xe2, 0x5c, 0xb4, 0x1d, 0xf3, 0x33, 0xef, 0x9b, 0xe7, 0x59, 0xd9, 0x1c, 0x2f, 0x7f, 0x07,
+ 0x00, 0x00, 0xff, 0xff, 0x7c, 0xe8, 0x91, 0x9a, 0x06, 0x03, 0x00, 0x00,
+}
+
+func (m *AppProtectWAFDetails) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AppProtectWAFDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AppProtectWAFDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Health != nil {
+ {
+ size, err := m.Health.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNap(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ThreatCampaignsVersion) > 0 {
+ i -= len(m.ThreatCampaignsVersion)
+ copy(dAtA[i:], m.ThreatCampaignsVersion)
+ i = encodeVarintNap(dAtA, i, uint64(len(m.ThreatCampaignsVersion)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.AttackSignaturesVersion) > 0 {
+ i -= len(m.AttackSignaturesVersion)
+ copy(dAtA[i:], m.AttackSignaturesVersion)
+ i = encodeVarintNap(dAtA, i, uint64(len(m.AttackSignaturesVersion)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.WafVersion) > 0 {
+ i -= len(m.WafVersion)
+ copy(dAtA[i:], m.WafVersion)
+ i = encodeVarintNap(dAtA, i, uint64(len(m.WafVersion)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AppProtectWAFHealth) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AppProtectWAFHealth) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AppProtectWAFHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.DegradedReason) > 0 {
+ i -= len(m.DegradedReason)
+ copy(dAtA[i:], m.DegradedReason)
+ i = encodeVarintNap(dAtA, i, uint64(len(m.DegradedReason)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.AppProtectWafStatus != 0 {
+ i = encodeVarintNap(dAtA, i, uint64(m.AppProtectWafStatus))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.SystemId) > 0 {
+ i -= len(m.SystemId)
+ copy(dAtA[i:], m.SystemId)
+ i = encodeVarintNap(dAtA, i, uint64(len(m.SystemId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintNap(dAtA []byte, offset int, v uint64) int {
+ offset -= sovNap(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AppProtectWAFDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.WafVersion)
+ if l > 0 {
+ n += 1 + l + sovNap(uint64(l))
+ }
+ l = len(m.AttackSignaturesVersion)
+ if l > 0 {
+ n += 1 + l + sovNap(uint64(l))
+ }
+ l = len(m.ThreatCampaignsVersion)
+ if l > 0 {
+ n += 1 + l + sovNap(uint64(l))
+ }
+ if m.Health != nil {
+ l = m.Health.Size()
+ n += 1 + l + sovNap(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AppProtectWAFHealth) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SystemId)
+ if l > 0 {
+ n += 1 + l + sovNap(uint64(l))
+ }
+ if m.AppProtectWafStatus != 0 {
+ n += 1 + sovNap(uint64(m.AppProtectWafStatus))
+ }
+ l = len(m.DegradedReason)
+ if l > 0 {
+ n += 1 + l + sovNap(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovNap(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozNap(x uint64) (n int) {
+ return sovNap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *AppProtectWAFDetails) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AppProtectWAFDetails: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AppProtectWAFDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WafVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNap
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.WafVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AttackSignaturesVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNap
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AttackSignaturesVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ThreatCampaignsVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNap
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ThreatCampaignsVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNap
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Health == nil {
+ m.Health = &AppProtectWAFHealth{}
+ }
+ if err := m.Health.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNap(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNap
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AppProtectWAFHealth) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AppProtectWAFHealth: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AppProtectWAFHealth: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNap
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SystemId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AppProtectWafStatus", wireType)
+ }
+ m.AppProtectWafStatus = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AppProtectWafStatus |= AppProtectWAFHealth_AppProtectWAFStatus(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DegradedReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNap
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DegradedReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNap(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNap
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipNap(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthNap
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupNap
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthNap
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthNap = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowNap = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupNap = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.proto
new file mode 100644
index 000000000..ab2348edb
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nap.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "gogo.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// Represents App Protect WAF details
+message AppProtectWAFDetails {
+ // WAF version
+ string waf_version = 1 [(gogoproto.jsontag) = "waf_version"];
+ // Attack signatures version (This is being deprecated and will be removed in a future release)
+ string attack_signatures_version = 2 [(gogoproto.jsontag) = "attack_signatures_version"];
+ // Threat signatures version (This is being deprecated and will be removed in a future release)
+ string threat_campaigns_version = 3 [(gogoproto.jsontag) = "threat_campaigns_version"];
+ // App Protect Health details (This is being deprecated and will be removed in a future release)
+ AppProtectWAFHealth health = 4 [(gogoproto.jsontag) = "health"];
+}
+
+// Represents the health of App Protect WAF
+message AppProtectWAFHealth {
+ // Status enum
+ enum AppProtectWAFStatus {
+ // Unknown status
+ UNKNOWN = 0;
+ // Active status
+ ACTIVE = 1;
+ // Degraded status
+ DEGRADED = 2;
+ }
+ // System ID
+ string system_id = 1 [(gogoproto.jsontag) = "system_id"];
+ // App Protect WAF status
+ AppProtectWAFStatus app_protect_waf_status = 2 [(gogoproto.jsontag) = "app_protect_waf_status"];
+ // Provides an error message of why App Protect WAF is degraded
+ string degraded_reason = 3 [(gogoproto.jsontag) = "degraded_reason"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.pb.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.pb.go
new file mode 100644
index 000000000..d3d3ea688
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.pb.go
@@ -0,0 +1,3503 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: nginx.proto
+
+package proto
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// NGINX config action enum
+type NginxConfigAction int32
+
+const (
+ // Unknown action
+ NginxConfigAction_UNKNOWN NginxConfigAction = 0
+ // Apply config action
+ NginxConfigAction_APPLY NginxConfigAction = 1
+ // Test config action (This will be implemented in a future release)
+ NginxConfigAction_TEST NginxConfigAction = 2
+ // Rollback config action (This will be implemented in a future release)
+ NginxConfigAction_ROLLBACK NginxConfigAction = 3
+ // Return config action (This will be implemented in a future release)
+ NginxConfigAction_RETURN NginxConfigAction = 4
+ // Force config apply action
+ NginxConfigAction_FORCE NginxConfigAction = 5
+)
+
+var NginxConfigAction_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "APPLY",
+ 2: "TEST",
+ 3: "ROLLBACK",
+ 4: "RETURN",
+ 5: "FORCE",
+}
+
+var NginxConfigAction_value = map[string]int32{
+ "UNKNOWN": 0,
+ "APPLY": 1,
+ "TEST": 2,
+ "ROLLBACK": 3,
+ "RETURN": 4,
+ "FORCE": 5,
+}
+
+func (x NginxConfigAction) String() string {
+ return proto.EnumName(NginxConfigAction_name, int32(x))
+}
+
+func (NginxConfigAction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{0}
+}
+
+// SSL type enum
+type NginxSslMetaData_NginxSslType int32
+
+const (
+ // SSL complied with NGINX
+ NginxSslMetaData_BUILT NginxSslMetaData_NginxSslType = 0
+ // SSL not complied with NGINX
+ NginxSslMetaData_RUN NginxSslMetaData_NginxSslType = 1
+)
+
+var NginxSslMetaData_NginxSslType_name = map[int32]string{
+ 0: "BUILT",
+ 1: "RUN",
+}
+
+var NginxSslMetaData_NginxSslType_value = map[string]int32{
+ "BUILT": 0,
+ "RUN": 1,
+}
+
+func (x NginxSslMetaData_NginxSslType) String() string {
+ return proto.EnumName(NginxSslMetaData_NginxSslType_name, int32(x))
+}
+
+func (NginxSslMetaData_NginxSslType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{2, 0}
+}
+
+// NGINX status enum
+type NginxHealth_NginxStatus int32
+
+const (
+ // Unknown status
+ NginxHealth_UNKNOWN NginxHealth_NginxStatus = 0
+ // Active status
+ NginxHealth_ACTIVE NginxHealth_NginxStatus = 1
+ // Degraded status
+ NginxHealth_DEGRADED NginxHealth_NginxStatus = 2
+)
+
+var NginxHealth_NginxStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ACTIVE",
+ 2: "DEGRADED",
+}
+
+var NginxHealth_NginxStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ACTIVE": 1,
+ "DEGRADED": 2,
+}
+
+func (x NginxHealth_NginxStatus) String() string {
+ return proto.EnumName(NginxHealth_NginxStatus_name, int32(x))
+}
+
+func (NginxHealth_NginxStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{3, 0}
+}
+
+// swagger:model NginxDetails
+// Represents NGINX details about a single NGINX instance
+type NginxDetails struct {
+ // NGINX ID.
+ // Example: b636d4376dea15405589692d3c5d3869ff3a9b26b0e7bb4bb1aa7e658ace1437
+ NginxId string `protobuf:"bytes,1,opt,name=nginx_id,json=nginxId,proto3" json:"nginx_id"`
+ // NGINX version.
+ // Example: 1.23.2
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version"`
+ // Path to NGINX configuration.
+ // Example: /usr/local/nginx/conf/nginx.conf
+ ConfPath string `protobuf:"bytes,3,opt,name=conf_path,json=confPath,proto3" json:"conf_path"`
+ // Process ID of NGINX instance.
+ // Example: 8
+ ProcessId string `protobuf:"bytes,4,opt,name=process_id,json=processId,proto3" json:"process_id"`
+ // The path to the NGINX executable.
+ // Example: /usr/local/nginx/sbin/nginx
+ ProcessPath string `protobuf:"bytes,5,opt,name=process_path,json=processPath,proto3" json:"process_path"`
+ // The start time of the NGINX instance.
+ // Example: 1670429190000
+ StartTime int64 `protobuf:"varint,6,opt,name=start_time,json=startTime,proto3" json:"start_time"`
+ // Determines if the NGINX instance was built from the source code in github or not.
+ // Example: false
+ BuiltFromSource bool `protobuf:"varint,7,opt,name=built_from_source,json=builtFromSource,proto3" json:"built_from_source"`
+ // List of NGINX loadable modules.
+ // Example: []
+ LoadableModules []string `protobuf:"bytes,8,rep,name=loadable_modules,json=loadableModules,proto3" json:"loadable_modules"`
+ // List of NGINX runtime modules.
+ // Example: [ "http_stub_status_module" ]
+ RuntimeModules []string `protobuf:"bytes,9,rep,name=runtime_modules,json=runtimeModules,proto3" json:"runtime_modules"`
+ // NGINX Plus metadata.
+ Plus *NginxPlusMetaData `protobuf:"bytes,10,opt,name=plus,proto3" json:"plus"`
+ // NGINX SSL metadata.
+ Ssl *NginxSslMetaData `protobuf:"bytes,11,opt,name=ssl,proto3" json:"ssl"`
+ // Status URL.
+ // Example: http://localhost:8080/api
+ StatusUrl string `protobuf:"bytes,12,opt,name=status_url,json=statusUrl,proto3" json:"status_url"`
+ // Command line arguments that were used when the NGINX instance was started.
+ // Example: [ "", "with-http_stub_status_module" ]
+ ConfigureArgs []string `protobuf:"bytes,13,rep,name=configure_args,json=configureArgs,proto3" json:"configure_args"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxDetails) Reset() { *m = NginxDetails{} }
+func (m *NginxDetails) String() string { return proto.CompactTextString(m) }
+func (*NginxDetails) ProtoMessage() {}
+func (*NginxDetails) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{0}
+}
+func (m *NginxDetails) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxDetails.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxDetails) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxDetails.Merge(m, src)
+}
+func (m *NginxDetails) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxDetails) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxDetails proto.InternalMessageInfo
+
+func (m *NginxDetails) GetNginxId() string {
+ if m != nil {
+ return m.NginxId
+ }
+ return ""
+}
+
+func (m *NginxDetails) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *NginxDetails) GetConfPath() string {
+ if m != nil {
+ return m.ConfPath
+ }
+ return ""
+}
+
+func (m *NginxDetails) GetProcessId() string {
+ if m != nil {
+ return m.ProcessId
+ }
+ return ""
+}
+
+func (m *NginxDetails) GetProcessPath() string {
+ if m != nil {
+ return m.ProcessPath
+ }
+ return ""
+}
+
+func (m *NginxDetails) GetStartTime() int64 {
+ if m != nil {
+ return m.StartTime
+ }
+ return 0
+}
+
+func (m *NginxDetails) GetBuiltFromSource() bool {
+ if m != nil {
+ return m.BuiltFromSource
+ }
+ return false
+}
+
+func (m *NginxDetails) GetLoadableModules() []string {
+ if m != nil {
+ return m.LoadableModules
+ }
+ return nil
+}
+
+func (m *NginxDetails) GetRuntimeModules() []string {
+ if m != nil {
+ return m.RuntimeModules
+ }
+ return nil
+}
+
+func (m *NginxDetails) GetPlus() *NginxPlusMetaData {
+ if m != nil {
+ return m.Plus
+ }
+ return nil
+}
+
+func (m *NginxDetails) GetSsl() *NginxSslMetaData {
+ if m != nil {
+ return m.Ssl
+ }
+ return nil
+}
+
+func (m *NginxDetails) GetStatusUrl() string {
+ if m != nil {
+ return m.StatusUrl
+ }
+ return ""
+}
+
+func (m *NginxDetails) GetConfigureArgs() []string {
+ if m != nil {
+ return m.ConfigureArgs
+ }
+ return nil
+}
+
+// swagger:model NginxPlusMetaData
+// Represents NGINX Plus metadata
+type NginxPlusMetaData struct {
+ // Determines if its a plus instance or not.
+ // Example: true
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled"`
+ // NGINX Plus version.
+ // Example: R27
+ Release string `protobuf:"bytes,2,opt,name=release,proto3" json:"release"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxPlusMetaData) Reset() { *m = NginxPlusMetaData{} }
+func (m *NginxPlusMetaData) String() string { return proto.CompactTextString(m) }
+func (*NginxPlusMetaData) ProtoMessage() {}
+func (*NginxPlusMetaData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{1}
+}
+func (m *NginxPlusMetaData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxPlusMetaData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxPlusMetaData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxPlusMetaData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxPlusMetaData.Merge(m, src)
+}
+func (m *NginxPlusMetaData) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxPlusMetaData) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxPlusMetaData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxPlusMetaData proto.InternalMessageInfo
+
+func (m *NginxPlusMetaData) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *NginxPlusMetaData) GetRelease() string {
+ if m != nil {
+ return m.Release
+ }
+ return ""
+}
+
+// swagger:model NginxSslMetaData
+// Represents NGINX SSL metadata
+type NginxSslMetaData struct {
+ // SSL Type.
+ // Example: 0
+ SslType NginxSslMetaData_NginxSslType `protobuf:"varint,1,opt,name=ssl_type,json=sslType,proto3,enum=f5.nginx.agent.sdk.NginxSslMetaData_NginxSslType" json:"ssl_type"`
+ // List of SSL information (e.g. version, type, etc).
+ // Example: null
+ Details []string `protobuf:"bytes,2,rep,name=details,proto3" json:"details"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxSslMetaData) Reset() { *m = NginxSslMetaData{} }
+func (m *NginxSslMetaData) String() string { return proto.CompactTextString(m) }
+func (*NginxSslMetaData) ProtoMessage() {}
+func (*NginxSslMetaData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{2}
+}
+func (m *NginxSslMetaData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxSslMetaData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxSslMetaData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxSslMetaData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxSslMetaData.Merge(m, src)
+}
+func (m *NginxSslMetaData) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxSslMetaData) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxSslMetaData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxSslMetaData proto.InternalMessageInfo
+
+func (m *NginxSslMetaData) GetSslType() NginxSslMetaData_NginxSslType {
+ if m != nil {
+ return m.SslType
+ }
+ return NginxSslMetaData_BUILT
+}
+
+func (m *NginxSslMetaData) GetDetails() []string {
+ if m != nil {
+ return m.Details
+ }
+ return nil
+}
+
+// Represents the health of a NGINX instance
+type NginxHealth struct {
+ // NGINX ID
+ NginxId string `protobuf:"bytes,1,opt,name=nginx_id,json=nginxId,proto3" json:"nginx_id"`
+ // NGINX status
+ NginxStatus NginxHealth_NginxStatus `protobuf:"varint,2,opt,name=nginx_status,json=nginxStatus,proto3,enum=f5.nginx.agent.sdk.NginxHealth_NginxStatus" json:"nginx_status"`
+ // Provides an error message of why a NGINX instance is degraded
+ DegradedReason string `protobuf:"bytes,3,opt,name=degraded_reason,json=degradedReason,proto3" json:"degraded_reason"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxHealth) Reset() { *m = NginxHealth{} }
+func (m *NginxHealth) String() string { return proto.CompactTextString(m) }
+func (*NginxHealth) ProtoMessage() {}
+func (*NginxHealth) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{3}
+}
+func (m *NginxHealth) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxHealth.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxHealth) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxHealth.Merge(m, src)
+}
+func (m *NginxHealth) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxHealth) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxHealth.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxHealth proto.InternalMessageInfo
+
+func (m *NginxHealth) GetNginxId() string {
+ if m != nil {
+ return m.NginxId
+ }
+ return ""
+}
+
+func (m *NginxHealth) GetNginxStatus() NginxHealth_NginxStatus {
+ if m != nil {
+ return m.NginxStatus
+ }
+ return NginxHealth_UNKNOWN
+}
+
+func (m *NginxHealth) GetDegradedReason() string {
+ if m != nil {
+ return m.DegradedReason
+ }
+ return ""
+}
+
+// Represents a NGINX config
+type NginxConfig struct {
+ // NGINX config action
+ Action NginxConfigAction `protobuf:"varint,1,opt,name=action,proto3,enum=f5.nginx.agent.sdk.NginxConfigAction" json:"action"`
+ // Metadata information about the configuration
+ ConfigData *ConfigDescriptor `protobuf:"bytes,2,opt,name=config_data,json=configData,proto3" json:"config_data"`
+ // Zipped file of all NGINX config files
+ Zconfig *ZippedFile `protobuf:"bytes,3,opt,name=zconfig,proto3" json:"zconfig"`
+ // Zipped file of all auxiliary files
+ Zaux *ZippedFile `protobuf:"bytes,4,opt,name=zaux,proto3" json:"zaux"`
+ // Information about all access log files
+ AccessLogs *AccessLogs `protobuf:"bytes,5,opt,name=access_logs,json=accessLogs,proto3" json:"access_logs"`
+ // Information about all error log files
+ ErrorLogs *ErrorLogs `protobuf:"bytes,6,opt,name=error_logs,json=errorLogs,proto3" json:"error_logs"`
+ // Information about all SSL certificates files
+ Ssl *SslCertificates `protobuf:"bytes,7,opt,name=ssl,proto3" json:"ssl"`
+ // Directory map of all config and aux files
+ DirectoryMap *DirectoryMap `protobuf:"bytes,8,opt,name=directory_map,json=directoryMap,proto3" json:"directory_map"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NginxConfig) Reset() { *m = NginxConfig{} }
+func (m *NginxConfig) String() string { return proto.CompactTextString(m) }
+func (*NginxConfig) ProtoMessage() {}
+func (*NginxConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{4}
+}
+func (m *NginxConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NginxConfig.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NginxConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxConfig.Merge(m, src)
+}
+func (m *NginxConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxConfig proto.InternalMessageInfo
+
+func (m *NginxConfig) GetAction() NginxConfigAction {
+ if m != nil {
+ return m.Action
+ }
+ return NginxConfigAction_UNKNOWN
+}
+
+func (m *NginxConfig) GetConfigData() *ConfigDescriptor {
+ if m != nil {
+ return m.ConfigData
+ }
+ return nil
+}
+
+func (m *NginxConfig) GetZconfig() *ZippedFile {
+ if m != nil {
+ return m.Zconfig
+ }
+ return nil
+}
+
+func (m *NginxConfig) GetZaux() *ZippedFile {
+ if m != nil {
+ return m.Zaux
+ }
+ return nil
+}
+
+func (m *NginxConfig) GetAccessLogs() *AccessLogs {
+ if m != nil {
+ return m.AccessLogs
+ }
+ return nil
+}
+
+func (m *NginxConfig) GetErrorLogs() *ErrorLogs {
+ if m != nil {
+ return m.ErrorLogs
+ }
+ return nil
+}
+
+func (m *NginxConfig) GetSsl() *SslCertificates {
+ if m != nil {
+ return m.Ssl
+ }
+ return nil
+}
+
+func (m *NginxConfig) GetDirectoryMap() *DirectoryMap {
+ if m != nil {
+ return m.DirectoryMap
+ }
+ return nil
+}
+
+// Represents access log files
+type AccessLogs struct {
+ // List of access log files
+ AccessLog []*AccessLog `protobuf:"bytes,1,rep,name=access_log,json=accessLog,proto3" json:"access_log"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AccessLogs) Reset() { *m = AccessLogs{} }
+func (m *AccessLogs) String() string { return proto.CompactTextString(m) }
+func (*AccessLogs) ProtoMessage() {}
+func (*AccessLogs) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{5}
+}
+func (m *AccessLogs) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AccessLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AccessLogs.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AccessLogs) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AccessLogs.Merge(m, src)
+}
+func (m *AccessLogs) XXX_Size() int {
+ return m.Size()
+}
+func (m *AccessLogs) XXX_DiscardUnknown() {
+ xxx_messageInfo_AccessLogs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AccessLogs proto.InternalMessageInfo
+
+func (m *AccessLogs) GetAccessLog() []*AccessLog {
+ if m != nil {
+ return m.AccessLog
+ }
+ return nil
+}
+
+// Represents an access log file
+type AccessLog struct {
+ // Name of file
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"`
+ // Format of the file
+ Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format"`
+ // File Permissions
+ Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions"`
+ // Determines if the file is readable or not
+ Readable bool `protobuf:"varint,4,opt,name=readable,proto3" json:"readable"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AccessLog) Reset() { *m = AccessLog{} }
+func (m *AccessLog) String() string { return proto.CompactTextString(m) }
+func (*AccessLog) ProtoMessage() {}
+func (*AccessLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{6}
+}
+func (m *AccessLog) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AccessLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AccessLog.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AccessLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AccessLog.Merge(m, src)
+}
+func (m *AccessLog) XXX_Size() int {
+ return m.Size()
+}
+func (m *AccessLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_AccessLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AccessLog proto.InternalMessageInfo
+
+func (m *AccessLog) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AccessLog) GetFormat() string {
+ if m != nil {
+ return m.Format
+ }
+ return ""
+}
+
+func (m *AccessLog) GetPermissions() string {
+ if m != nil {
+ return m.Permissions
+ }
+ return ""
+}
+
+func (m *AccessLog) GetReadable() bool {
+ if m != nil {
+ return m.Readable
+ }
+ return false
+}
+
+// Represents error log files
+type ErrorLogs struct {
+ // List of error log files
+ ErrorLog []*ErrorLog `protobuf:"bytes,1,rep,name=error_log,json=errorLog,proto3" json:"error_log"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ErrorLogs) Reset() { *m = ErrorLogs{} }
+func (m *ErrorLogs) String() string { return proto.CompactTextString(m) }
+func (*ErrorLogs) ProtoMessage() {}
+func (*ErrorLogs) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{7}
+}
+func (m *ErrorLogs) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ErrorLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ErrorLogs.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ErrorLogs) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ErrorLogs.Merge(m, src)
+}
+func (m *ErrorLogs) XXX_Size() int {
+ return m.Size()
+}
+func (m *ErrorLogs) XXX_DiscardUnknown() {
+ xxx_messageInfo_ErrorLogs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ErrorLogs proto.InternalMessageInfo
+
+func (m *ErrorLogs) GetErrorLog() []*ErrorLog {
+ if m != nil {
+ return m.ErrorLog
+ }
+ return nil
+}
+
+// Represents an error log file
+type ErrorLog struct {
+ // Name of file
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"`
+ // Log level
+ LogLevel string `protobuf:"bytes,2,opt,name=log_level,json=logLevel,proto3" json:"log_level"`
+ // File Permissions
+ Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions"`
+ // Determines if the file is readable or not
+ Readable bool `protobuf:"varint,4,opt,name=readable,proto3" json:"readable"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ErrorLog) Reset() { *m = ErrorLog{} }
+func (m *ErrorLog) String() string { return proto.CompactTextString(m) }
+func (*ErrorLog) ProtoMessage() {}
+func (*ErrorLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_917f1a70b1fd7255, []int{8}
+}
+func (m *ErrorLog) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ErrorLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ErrorLog.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ErrorLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ErrorLog.Merge(m, src)
+}
+func (m *ErrorLog) XXX_Size() int {
+ return m.Size()
+}
+func (m *ErrorLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_ErrorLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ErrorLog proto.InternalMessageInfo
+
+func (m *ErrorLog) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ErrorLog) GetLogLevel() string {
+ if m != nil {
+ return m.LogLevel
+ }
+ return ""
+}
+
+func (m *ErrorLog) GetPermissions() string {
+ if m != nil {
+ return m.Permissions
+ }
+ return ""
+}
+
+func (m *ErrorLog) GetReadable() bool {
+ if m != nil {
+ return m.Readable
+ }
+ return false
+}
+
+func init() {
+ proto.RegisterEnum("f5.nginx.agent.sdk.NginxConfigAction", NginxConfigAction_name, NginxConfigAction_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.NginxSslMetaData_NginxSslType", NginxSslMetaData_NginxSslType_name, NginxSslMetaData_NginxSslType_value)
+ proto.RegisterEnum("f5.nginx.agent.sdk.NginxHealth_NginxStatus", NginxHealth_NginxStatus_name, NginxHealth_NginxStatus_value)
+ proto.RegisterType((*NginxDetails)(nil), "f5.nginx.agent.sdk.NginxDetails")
+ proto.RegisterType((*NginxPlusMetaData)(nil), "f5.nginx.agent.sdk.NginxPlusMetaData")
+ proto.RegisterType((*NginxSslMetaData)(nil), "f5.nginx.agent.sdk.NginxSslMetaData")
+ proto.RegisterType((*NginxHealth)(nil), "f5.nginx.agent.sdk.NginxHealth")
+ proto.RegisterType((*NginxConfig)(nil), "f5.nginx.agent.sdk.NginxConfig")
+ proto.RegisterType((*AccessLogs)(nil), "f5.nginx.agent.sdk.AccessLogs")
+ proto.RegisterType((*AccessLog)(nil), "f5.nginx.agent.sdk.AccessLog")
+ proto.RegisterType((*ErrorLogs)(nil), "f5.nginx.agent.sdk.ErrorLogs")
+ proto.RegisterType((*ErrorLog)(nil), "f5.nginx.agent.sdk.ErrorLog")
+}
+
+func init() { proto.RegisterFile("nginx.proto", fileDescriptor_917f1a70b1fd7255) }
+
+var fileDescriptor_917f1a70b1fd7255 = []byte{
+ // 1166 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0xdb, 0x46,
+ 0x10, 0x0e, 0x2d, 0xd9, 0x22, 0x47, 0xb2, 0xcd, 0x6c, 0x5b, 0x80, 0x28, 0x52, 0x53, 0x60, 0x1b,
+ 0xd4, 0x4d, 0x51, 0x1b, 0x71, 0x5a, 0x14, 0x45, 0x83, 0x06, 0x92, 0xa5, 0x34, 0x46, 0x14, 0xc7,
+ 0x58, 0xcb, 0x69, 0xeb, 0x0b, 0xbb, 0x16, 0xd7, 0x34, 0x11, 0x4a, 0x14, 0x76, 0xa9, 0x20, 0xc9,
+ 0x33, 0xf4, 0x49, 0x7a, 0xeb, 0xb1, 0x6f, 0xd0, 0x5b, 0xfb, 0x04, 0x44, 0x91, 0x23, 0x2f, 0x7d,
+ 0x85, 0x82, 0xb3, 0x4b, 0x4a, 0xfe, 0x6b, 0x72, 0xe9, 0x85, 0x9a, 0xf9, 0x76, 0xbe, 0x6f, 0x39,
+ 0xb3, 0xb3, 0x43, 0x41, 0x73, 0x12, 0x46, 0x93, 0x97, 0x5b, 0x53, 0x91, 0xa4, 0x09, 0x21, 0xa7,
+ 0x5f, 0x6d, 0x29, 0x9f, 0x85, 0x7c, 0x92, 0x6e, 0xc9, 0xe0, 0xf9, 0x87, 0xad, 0x51, 0x32, 0x1e,
+ 0x27, 0x13, 0x15, 0x51, 0x78, 0x93, 0xd3, 0x28, 0xd4, 0x1e, 0x84, 0x49, 0x98, 0x28, 0xdb, 0xfb,
+ 0x73, 0x19, 0x5a, 0xfb, 0x05, 0xb7, 0xc7, 0x53, 0x16, 0xc5, 0x92, 0x7c, 0x0a, 0x26, 0x6a, 0xf9,
+ 0x51, 0xe0, 0x18, 0x6d, 0x63, 0xd3, 0xea, 0xb6, 0xf2, 0xcc, 0xad, 0x30, 0xda, 0x40, 0x6b, 0x2f,
+ 0x20, 0xb7, 0xa1, 0xf1, 0x82, 0x0b, 0x19, 0x25, 0x13, 0x67, 0x09, 0xe3, 0x9a, 0x79, 0xe6, 0x96,
+ 0x10, 0x2d, 0x0d, 0x72, 0x07, 0xac, 0x62, 0x73, 0x7f, 0xca, 0xd2, 0x33, 0xa7, 0x86, 0x81, 0xab,
+ 0x79, 0xe6, 0xce, 0x41, 0x6a, 0x16, 0xe6, 0x01, 0x4b, 0xcf, 0xc8, 0x17, 0x00, 0x53, 0x91, 0x8c,
+ 0xb8, 0x94, 0xc5, 0xee, 0x75, 0x0c, 0x5e, 0xcb, 0x33, 0x77, 0x01, 0xa5, 0x96, 0xb6, 0xf7, 0x02,
+ 0x72, 0x0f, 0x5a, 0xe5, 0x02, 0xaa, 0x2f, 0x23, 0xc1, 0xce, 0x33, 0xf7, 0x1c, 0x4e, 0x9b, 0xda,
+ 0x2b, 0xf7, 0x90, 0x29, 0x13, 0xa9, 0x9f, 0x46, 0x63, 0xee, 0xac, 0xb4, 0x8d, 0xcd, 0x9a, 0xda,
+ 0x63, 0x8e, 0x52, 0x0b, 0xed, 0x61, 0x34, 0xe6, 0xa4, 0x03, 0x37, 0x4f, 0x66, 0x51, 0x9c, 0xfa,
+ 0xa7, 0x22, 0x19, 0xfb, 0x32, 0x99, 0x89, 0x11, 0x77, 0x1a, 0x6d, 0x63, 0xd3, 0xec, 0x7e, 0x90,
+ 0x67, 0xee, 0xe5, 0x45, 0xba, 0x8e, 0xd0, 0x43, 0x91, 0x8c, 0x0f, 0x11, 0x20, 0x0f, 0xc0, 0x8e,
+ 0x13, 0x16, 0xb0, 0x93, 0x98, 0xfb, 0xe3, 0x24, 0x98, 0xc5, 0x5c, 0x3a, 0x66, 0xbb, 0xb6, 0x69,
+ 0x75, 0xdf, 0xcf, 0x33, 0xf7, 0xd2, 0x1a, 0x5d, 0x2f, 0x91, 0x27, 0x0a, 0x20, 0xf7, 0x61, 0x5d,
+ 0xcc, 0x26, 0xc5, 0x9b, 0x55, 0x7c, 0x0b, 0xf9, 0xef, 0xe5, 0x99, 0x7b, 0x71, 0x89, 0xae, 0x69,
+ 0xa0, 0x64, 0xef, 0x42, 0x7d, 0x1a, 0xcf, 0xa4, 0x03, 0x6d, 0x63, 0xb3, 0xb9, 0x73, 0x7b, 0xeb,
+ 0x72, 0xb3, 0x6c, 0x61, 0x03, 0x1c, 0xc4, 0x33, 0xf9, 0x84, 0xa7, 0xac, 0xc7, 0x52, 0xd6, 0x35,
+ 0xf3, 0xcc, 0x45, 0x1a, 0xc5, 0x27, 0x79, 0x00, 0x35, 0x29, 0x63, 0xa7, 0x89, 0x1a, 0x9f, 0x5c,
+ 0xab, 0x71, 0x28, 0xe3, 0x4a, 0xa2, 0x91, 0x67, 0x6e, 0x41, 0xa2, 0xc5, 0x43, 0x97, 0x3d, 0x9d,
+ 0x49, 0x7f, 0x26, 0x62, 0xa7, 0x35, 0x3f, 0xda, 0x39, 0x8a, 0x65, 0x4f, 0x67, 0xf2, 0x48, 0xc4,
+ 0xe4, 0x1b, 0x58, 0x53, 0x2d, 0x3b, 0x13, 0xdc, 0x67, 0x22, 0x94, 0xce, 0x2a, 0x66, 0x4c, 0xf2,
+ 0xcc, 0xbd, 0xb0, 0x42, 0x57, 0x2b, 0xbf, 0x23, 0x42, 0xe9, 0x31, 0xb8, 0x79, 0x29, 0x9f, 0xa2,
+ 0x59, 0xf9, 0xa4, 0xa8, 0xa9, 0x6a, 0x6a, 0x53, 0x35, 0xab, 0x86, 0x68, 0x69, 0x14, 0x61, 0x82,
+ 0xc7, 0x9c, 0x49, 0xbe, 0xd8, 0xd3, 0x1a, 0xa2, 0xa5, 0xe1, 0xfd, 0x6e, 0x80, 0x7d, 0x31, 0x5f,
+ 0x72, 0x0c, 0xa6, 0x94, 0xb1, 0x9f, 0xbe, 0x9a, 0x72, 0xdc, 0x63, 0x6d, 0xe7, 0xee, 0xbb, 0xd4,
+ 0xa9, 0x02, 0x86, 0xaf, 0xa6, 0x5c, 0xdd, 0xb5, 0x52, 0x86, 0x36, 0xa4, 0x82, 0x8b, 0xf7, 0x0a,
+ 0xd4, 0xfd, 0x74, 0x96, 0xb0, 0x0e, 0xf8, 0x5e, 0x1a, 0xa2, 0xa5, 0xe1, 0x79, 0xfa, 0x2e, 0x6b,
+ 0x35, 0x62, 0xc1, 0x72, 0xf7, 0x68, 0x6f, 0x30, 0xb4, 0x6f, 0x90, 0x06, 0xd4, 0xe8, 0xd1, 0xbe,
+ 0x6d, 0x78, 0xbf, 0x2c, 0x41, 0x13, 0x83, 0x1e, 0x71, 0x16, 0xa7, 0x67, 0xef, 0x7e, 0xdf, 0x7f,
+ 0x86, 0x96, 0x02, 0xd5, 0x29, 0x61, 0x81, 0xd6, 0x76, 0x3e, 0xbf, 0x36, 0x47, 0xa5, 0xaf, 0xd3,
+ 0x43, 0x8a, 0xba, 0x9a, 0x8b, 0x22, 0x54, 0xcd, 0x31, 0xb5, 0x5c, 0xf4, 0x79, 0xc0, 0x43, 0xc1,
+ 0x02, 0x1e, 0xf8, 0x82, 0x33, 0x99, 0x4c, 0xf4, 0xc0, 0xc0, 0x3e, 0xbf, 0xb0, 0x44, 0xd7, 0x4a,
+ 0x80, 0xa2, 0xef, 0x7d, 0xa9, 0xf3, 0xd2, 0x62, 0x4d, 0x68, 0x1c, 0xed, 0x3f, 0xde, 0x7f, 0xfa,
+ 0xc3, 0xbe, 0x7d, 0x83, 0x00, 0xac, 0x74, 0x76, 0x87, 0x7b, 0xcf, 0xfa, 0xb6, 0x41, 0x5a, 0x60,
+ 0xf6, 0xfa, 0xdf, 0xd3, 0x4e, 0xaf, 0xdf, 0xb3, 0x97, 0xbc, 0x7f, 0xea, 0x9a, 0xb6, 0x8b, 0x4d,
+ 0x44, 0xf6, 0x60, 0x85, 0x8d, 0xd2, 0x62, 0xa8, 0xa9, 0x33, 0xbc, 0xfe, 0xbe, 0x28, 0x42, 0x07,
+ 0x83, 0xbb, 0x90, 0x67, 0xae, 0x26, 0x52, 0xfd, 0x4b, 0x9e, 0x41, 0x53, 0x75, 0xa6, 0x1f, 0xb0,
+ 0x94, 0x61, 0xbd, 0xae, 0xb9, 0x3b, 0x4a, 0xaa, 0xc7, 0xe5, 0x48, 0x44, 0xd3, 0x34, 0x11, 0xdd,
+ 0xf5, 0x3c, 0x73, 0x17, 0xc9, 0x14, 0x94, 0x83, 0x8d, 0xd6, 0x87, 0xc6, 0x6b, 0xe5, 0x62, 0x79,
+ 0x9a, 0x3b, 0x1b, 0x57, 0x69, 0x1e, 0x47, 0xd3, 0x29, 0x0f, 0x1e, 0x46, 0x31, 0x57, 0xcd, 0xa2,
+ 0x29, 0xb4, 0x34, 0xc8, 0x7d, 0xa8, 0xbf, 0x66, 0xb3, 0x97, 0x38, 0x66, 0xdf, 0xae, 0x81, 0x03,
+ 0xa1, 0x88, 0xa7, 0xf8, 0x24, 0x07, 0xd0, 0x64, 0x23, 0x1c, 0xb1, 0x71, 0x12, 0x4a, 0x1c, 0xbd,
+ 0xd7, 0x88, 0x74, 0x30, 0x6c, 0x90, 0x84, 0x52, 0xa5, 0xb5, 0x40, 0xa3, 0xc0, 0xaa, 0x45, 0x32,
+ 0x00, 0xe0, 0x42, 0x24, 0x42, 0x09, 0xae, 0xa0, 0xe0, 0x47, 0x57, 0x09, 0xf6, 0x8b, 0x28, 0xd4,
+ 0xc3, 0x01, 0x32, 0x27, 0x51, 0x8b, 0x97, 0x4b, 0xe4, 0x3b, 0x35, 0xb0, 0x1a, 0x28, 0xf3, 0xf1,
+ 0x55, 0x32, 0x87, 0x32, 0xde, 0xe5, 0x22, 0x8d, 0x4e, 0xa3, 0x11, 0x4b, 0xb9, 0xbc, 0x30, 0xaf,
+ 0x7e, 0x84, 0xd5, 0x20, 0x12, 0x7c, 0x94, 0x26, 0xe2, 0x95, 0x3f, 0x66, 0x53, 0xc7, 0x44, 0xa5,
+ 0xf6, 0x55, 0x4a, 0xbd, 0x32, 0xf0, 0x09, 0x9b, 0x76, 0x6f, 0xe6, 0x99, 0x7b, 0x9e, 0x4a, 0x5b,
+ 0xc1, 0x42, 0x80, 0x77, 0x0c, 0xd0, 0x39, 0x97, 0xf5, 0xbc, 0x20, 0x8e, 0xd1, 0xae, 0x5d, 0x97,
+ 0x75, 0xc5, 0x51, 0x59, 0xcf, 0x49, 0xd4, 0xaa, 0x8a, 0xe8, 0xfd, 0x6a, 0x80, 0x55, 0x05, 0x92,
+ 0x5b, 0x50, 0x9f, 0xb0, 0x31, 0xd7, 0xd7, 0x1a, 0x4f, 0xb0, 0xf0, 0x29, 0x3e, 0x89, 0x07, 0x2b,
+ 0xa7, 0x89, 0x18, 0xb3, 0x54, 0x8f, 0x3a, 0x6c, 0x61, 0x85, 0x50, 0xfd, 0x4b, 0xee, 0x42, 0x73,
+ 0xca, 0xc5, 0x38, 0x92, 0xc5, 0xa7, 0x5c, 0xea, 0xdb, 0x88, 0xa7, 0xb8, 0x00, 0xd3, 0x45, 0x87,
+ 0x6c, 0x82, 0x29, 0xb8, 0xfa, 0x7e, 0x61, 0x6b, 0x99, 0x6a, 0x9e, 0x94, 0x18, 0xad, 0x2c, 0xef,
+ 0x08, 0xac, 0xea, 0x28, 0xc9, 0x23, 0xb0, 0xaa, 0x83, 0xd4, 0x65, 0xb8, 0xf5, 0x5f, 0x87, 0xaf,
+ 0xfe, 0x44, 0x54, 0x14, 0x6a, 0x96, 0x47, 0xef, 0xfd, 0x66, 0x80, 0x59, 0x46, 0xbd, 0xa5, 0x04,
+ 0x77, 0xc0, 0x8a, 0x93, 0xd0, 0x8f, 0xf9, 0x0b, 0x1e, 0xeb, 0x2a, 0xa0, 0x6c, 0x05, 0x52, 0x33,
+ 0x4e, 0xc2, 0x41, 0x61, 0xfd, 0xaf, 0xa5, 0xb8, 0x73, 0xac, 0xbf, 0x59, 0x8b, 0x33, 0xe5, 0xfc,
+ 0x04, 0xb3, 0x60, 0xb9, 0x73, 0x70, 0x30, 0xf8, 0xc9, 0x36, 0x88, 0x09, 0xf5, 0x61, 0xff, 0x70,
+ 0x68, 0x2f, 0x15, 0xa3, 0x8c, 0x3e, 0x1d, 0x0c, 0xba, 0x9d, 0xdd, 0xc7, 0x76, 0xad, 0x18, 0x72,
+ 0xb4, 0x3f, 0x3c, 0xa2, 0xfb, 0x76, 0xbd, 0x08, 0x7f, 0xf8, 0x94, 0xee, 0xf6, 0xed, 0xe5, 0xee,
+ 0xd7, 0x7f, 0xbc, 0xd9, 0x30, 0xfe, 0x7a, 0xb3, 0x61, 0xfc, 0xfd, 0x66, 0xc3, 0x38, 0xfe, 0x2c,
+ 0x8c, 0xd2, 0xb3, 0xd9, 0xc9, 0xd6, 0x28, 0x19, 0x6f, 0x63, 0x6d, 0xb7, 0xb1, 0xb6, 0xdb, 0x32,
+ 0x78, 0xbe, 0xfd, 0x62, 0x67, 0x1b, 0xff, 0x14, 0x7e, 0x8b, 0xcf, 0x93, 0x15, 0xfc, 0xb9, 0xf7,
+ 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x80, 0x5c, 0xfb, 0x6c, 0x0a, 0x00, 0x00,
+}
+
+func (m *NginxDetails) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.ConfigureArgs) > 0 {
+ for iNdEx := len(m.ConfigureArgs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ConfigureArgs[iNdEx])
+ copy(dAtA[i:], m.ConfigureArgs[iNdEx])
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.ConfigureArgs[iNdEx])))
+ i--
+ dAtA[i] = 0x6a
+ }
+ }
+ if len(m.StatusUrl) > 0 {
+ i -= len(m.StatusUrl)
+ copy(dAtA[i:], m.StatusUrl)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.StatusUrl)))
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.Ssl != nil {
+ {
+ size, err := m.Ssl.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ if m.Plus != nil {
+ {
+ size, err := m.Plus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.RuntimeModules) > 0 {
+ for iNdEx := len(m.RuntimeModules) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.RuntimeModules[iNdEx])
+ copy(dAtA[i:], m.RuntimeModules[iNdEx])
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.RuntimeModules[iNdEx])))
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ if len(m.LoadableModules) > 0 {
+ for iNdEx := len(m.LoadableModules) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.LoadableModules[iNdEx])
+ copy(dAtA[i:], m.LoadableModules[iNdEx])
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.LoadableModules[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if m.BuiltFromSource {
+ i--
+ if m.BuiltFromSource {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.StartTime != 0 {
+ i = encodeVarintNginx(dAtA, i, uint64(m.StartTime))
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.ProcessPath) > 0 {
+ i -= len(m.ProcessPath)
+ copy(dAtA[i:], m.ProcessPath)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.ProcessPath)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.ProcessId) > 0 {
+ i -= len(m.ProcessId)
+ copy(dAtA[i:], m.ProcessId)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.ProcessId)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ConfPath) > 0 {
+ i -= len(m.ConfPath)
+ copy(dAtA[i:], m.ConfPath)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.ConfPath)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.NginxId) > 0 {
+ i -= len(m.NginxId)
+ copy(dAtA[i:], m.NginxId)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.NginxId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxPlusMetaData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxPlusMetaData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxPlusMetaData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Release) > 0 {
+ i -= len(m.Release)
+ copy(dAtA[i:], m.Release)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Release)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Enabled {
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxSslMetaData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxSslMetaData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxSslMetaData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Details) > 0 {
+ for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Details[iNdEx])
+ copy(dAtA[i:], m.Details[iNdEx])
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Details[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.SslType != 0 {
+ i = encodeVarintNginx(dAtA, i, uint64(m.SslType))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxHealth) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxHealth) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.DegradedReason) > 0 {
+ i -= len(m.DegradedReason)
+ copy(dAtA[i:], m.DegradedReason)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.DegradedReason)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.NginxStatus != 0 {
+ i = encodeVarintNginx(dAtA, i, uint64(m.NginxStatus))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.NginxId) > 0 {
+ i -= len(m.NginxId)
+ copy(dAtA[i:], m.NginxId)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.NginxId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.DirectoryMap != nil {
+ {
+ size, err := m.DirectoryMap.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Ssl != nil {
+ {
+ size, err := m.Ssl.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.ErrorLogs != nil {
+ {
+ size, err := m.ErrorLogs.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.AccessLogs != nil {
+ {
+ size, err := m.AccessLogs.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Zaux != nil {
+ {
+ size, err := m.Zaux.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Zconfig != nil {
+ {
+ size, err := m.Zconfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ConfigData != nil {
+ {
+ size, err := m.ConfigData.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Action != 0 {
+ i = encodeVarintNginx(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AccessLogs) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AccessLogs) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AccessLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.AccessLog) > 0 {
+ for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AccessLog[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AccessLog) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AccessLog) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AccessLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Readable {
+ i--
+ if m.Readable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Permissions) > 0 {
+ i -= len(m.Permissions)
+ copy(dAtA[i:], m.Permissions)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Permissions)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Format) > 0 {
+ i -= len(m.Format)
+ copy(dAtA[i:], m.Format)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Format)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ErrorLogs) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ErrorLogs) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ErrorLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.ErrorLog) > 0 {
+ for iNdEx := len(m.ErrorLog) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ErrorLog[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNginx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ErrorLog) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ErrorLog) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ErrorLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Readable {
+ i--
+ if m.Readable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Permissions) > 0 {
+ i -= len(m.Permissions)
+ copy(dAtA[i:], m.Permissions)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Permissions)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.LogLevel) > 0 {
+ i -= len(m.LogLevel)
+ copy(dAtA[i:], m.LogLevel)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.LogLevel)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintNginx(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintNginx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovNginx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *NginxDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.NginxId)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.ConfPath)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.ProcessId)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.ProcessPath)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.StartTime != 0 {
+ n += 1 + sovNginx(uint64(m.StartTime))
+ }
+ if m.BuiltFromSource {
+ n += 2
+ }
+ if len(m.LoadableModules) > 0 {
+ for _, s := range m.LoadableModules {
+ l = len(s)
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ }
+ if len(m.RuntimeModules) > 0 {
+ for _, s := range m.RuntimeModules {
+ l = len(s)
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ }
+ if m.Plus != nil {
+ l = m.Plus.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.Ssl != nil {
+ l = m.Ssl.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.StatusUrl)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if len(m.ConfigureArgs) > 0 {
+ for _, s := range m.ConfigureArgs {
+ l = len(s)
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *NginxPlusMetaData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Enabled {
+ n += 2
+ }
+ l = len(m.Release)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *NginxSslMetaData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.SslType != 0 {
+ n += 1 + sovNginx(uint64(m.SslType))
+ }
+ if len(m.Details) > 0 {
+ for _, s := range m.Details {
+ l = len(s)
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *NginxHealth) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.NginxId)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.NginxStatus != 0 {
+ n += 1 + sovNginx(uint64(m.NginxStatus))
+ }
+ l = len(m.DegradedReason)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *NginxConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Action != 0 {
+ n += 1 + sovNginx(uint64(m.Action))
+ }
+ if m.ConfigData != nil {
+ l = m.ConfigData.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.Zconfig != nil {
+ l = m.Zconfig.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.Zaux != nil {
+ l = m.Zaux.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.AccessLogs != nil {
+ l = m.AccessLogs.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.ErrorLogs != nil {
+ l = m.ErrorLogs.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.Ssl != nil {
+ l = m.Ssl.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.DirectoryMap != nil {
+ l = m.DirectoryMap.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AccessLogs) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.AccessLog) > 0 {
+ for _, e := range m.AccessLog {
+ l = e.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AccessLog) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.Format)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.Permissions)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.Readable {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ErrorLogs) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ErrorLog) > 0 {
+ for _, e := range m.ErrorLog {
+ l = e.Size()
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ErrorLog) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.LogLevel)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ l = len(m.Permissions)
+ if l > 0 {
+ n += 1 + l + sovNginx(uint64(l))
+ }
+ if m.Readable {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovNginx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozNginx(x uint64) (n int) {
+ return sovNginx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *NginxDetails) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxDetails: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NginxId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConfPath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProcessId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProcessId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProcessPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProcessPath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ m.StartTime = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StartTime |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuiltFromSource", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.BuiltFromSource = bool(v != 0)
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadableModules", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LoadableModules = append(m.LoadableModules, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RuntimeModules", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RuntimeModules = append(m.RuntimeModules, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Plus == nil {
+ m.Plus = &NginxPlusMetaData{}
+ }
+ if err := m.Plus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ssl", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Ssl == nil {
+ m.Ssl = &NginxSslMetaData{}
+ }
+ if err := m.Ssl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StatusUrl", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StatusUrl = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigureArgs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConfigureArgs = append(m.ConfigureArgs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxPlusMetaData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxPlusMetaData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxPlusMetaData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Release", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Release = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxSslMetaData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxSslMetaData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxSslMetaData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SslType", wireType)
+ }
+ m.SslType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SslType |= NginxSslMetaData_NginxSslType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Details = append(m.Details, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxHealth) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxHealth: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxHealth: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NginxId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxStatus", wireType)
+ }
+ m.NginxStatus = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NginxStatus |= NginxHealth_NginxStatus(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DegradedReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DegradedReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= NginxConfigAction(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigData == nil {
+ m.ConfigData = &ConfigDescriptor{}
+ }
+ if err := m.ConfigData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zconfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Zconfig == nil {
+ m.Zconfig = &ZippedFile{}
+ }
+ if err := m.Zconfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zaux", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Zaux == nil {
+ m.Zaux = &ZippedFile{}
+ }
+ if err := m.Zaux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessLogs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AccessLogs == nil {
+ m.AccessLogs = &AccessLogs{}
+ }
+ if err := m.AccessLogs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorLogs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ErrorLogs == nil {
+ m.ErrorLogs = &ErrorLogs{}
+ }
+ if err := m.ErrorLogs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ssl", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Ssl == nil {
+ m.Ssl = &SslCertificates{}
+ }
+ if err := m.Ssl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DirectoryMap", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DirectoryMap == nil {
+ m.DirectoryMap = &DirectoryMap{}
+ }
+ if err := m.DirectoryMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AccessLogs) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AccessLogs: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AccessLogs: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessLog", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AccessLog = append(m.AccessLog, &AccessLog{})
+ if err := m.AccessLog[len(m.AccessLog)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AccessLog) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AccessLog: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AccessLog: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Format = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Permissions = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Readable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Readable = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ErrorLogs) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ErrorLogs: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ErrorLogs: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorLog", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ErrorLog = append(m.ErrorLog, &ErrorLog{})
+ if err := m.ErrorLog[len(m.ErrorLog)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ErrorLog) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ErrorLog: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ErrorLog: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogLevel", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LogLevel = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNginx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Permissions = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Readable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Readable = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNginx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthNginx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipNginx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowNginx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthNginx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupNginx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthNginx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthNginx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowNginx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupNginx = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.proto b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.proto
new file mode 100644
index 000000000..7ac92921b
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/proto/nginx.proto
@@ -0,0 +1,170 @@
+syntax = "proto3";
+package f5.nginx.agent.sdk;
+
+import "common.proto";
+import "config.proto";
+import "gogo.proto";
+
+option go_package = "github.com/nginx/agent/sdk/v2/proto;proto";
+
+// swagger:model NginxDetails
+// Represents NGINX details about a single NGINX instance
+message NginxDetails {
+ // NGINX ID.
+ // Example: b636d4376dea15405589692d3c5d3869ff3a9b26b0e7bb4bb1aa7e658ace1437
+ string nginx_id = 1 [(gogoproto.jsontag) = "nginx_id"];
+ // NGINX version.
+ // Example: 1.23.2
+ string version = 2 [(gogoproto.jsontag) = "version"];
+ // Path to NGINX configuration.
+ // Example: /usr/local/nginx/conf/nginx.conf
+ string conf_path = 3 [(gogoproto.jsontag) = "conf_path"];
+ // Process ID of NGINX instance.
+ // Example: 8
+ string process_id = 4 [(gogoproto.jsontag) = "process_id"];
+ // The path to the NGINX executable.
+ // Example: /usr/local/nginx/sbin/nginx
+ string process_path = 5 [(gogoproto.jsontag) = "process_path"];
+ // The start time of the NGINX instance.
+ // Example: 1670429190000
+ int64 start_time = 6 [(gogoproto.jsontag) = "start_time"];
+ // Determines if the NGINX instance was built from the source code in github or not.
+ // Example: false
+ bool built_from_source = 7 [(gogoproto.jsontag) = "built_from_source"];
+ // List of NGINX loadable modules.
+ // Example: []
+ repeated string loadable_modules = 8 [(gogoproto.jsontag) = "loadable_modules"];
+ // List of NGINX runtime modules.
+ // Example: [ "http_stub_status_module" ]
+ repeated string runtime_modules = 9 [(gogoproto.jsontag) = "runtime_modules"];
+ // NGINX Plus metadata.
+ NginxPlusMetaData plus = 10 [(gogoproto.jsontag) = "plus"];
+ // NGINX SSL metadata.
+ NginxSslMetaData ssl = 11 [(gogoproto.jsontag) = "ssl"];
+ // Status URL.
+ // Example: http://localhost:8080/api
+ string status_url = 12 [(gogoproto.jsontag) = "status_url"];
+ // Command line arguments that were used when the NGINX instance was started.
+ // Example: [ "", "with-http_stub_status_module" ]
+ repeated string configure_args = 13 [(gogoproto.jsontag) = "configure_args"];
+}
+
+// swagger:model NginxPlusMetaData
+// Represents NGINX Plus metadata
+message NginxPlusMetaData {
+ // Determines if its a plus instance or not.
+ // Example: true
+ bool enabled = 1 [(gogoproto.jsontag) = "enabled"];
+ // NGINX Plus version.
+ // Example: R27
+ string release = 2 [(gogoproto.jsontag) = "release"];
+}
+
+// swagger:model NginxSslMetaData
+// Represents NGINX SSL metadata
+message NginxSslMetaData {
+ // SSL type enum
+ enum NginxSslType {
+ // SSL complied with NGINX
+ BUILT = 0;
+ // SSL not complied with NGINX
+ RUN = 1;
+ }
+ // SSL Type.
+ // Example: 0
+ NginxSslType ssl_type = 1 [(gogoproto.jsontag) = "ssl_type"];
+ // List of SSL information (e.g. version, type, etc).
+ // Example: null
+ repeated string details = 2 [(gogoproto.jsontag) = "details"];
+}
+
+// Represents the health of a NGINX instance
+message NginxHealth {
+ // NGINX status enum
+ enum NginxStatus {
+ // Unknown status
+ UNKNOWN = 0;
+ // Active status
+ ACTIVE = 1;
+ // Degraded status
+ DEGRADED = 2;
+ }
+ // NGINX ID
+ string nginx_id = 1 [(gogoproto.jsontag) = "nginx_id"];
+ // NGINX status
+ NginxStatus nginx_status = 2 [(gogoproto.jsontag) = "nginx_status"];
+ // Provides an error message of why a NGINX instance is degraded
+ string degraded_reason = 3 [(gogoproto.jsontag) = "degraded_reason"];
+}
+
+// NGINX config action enum
+enum NginxConfigAction {
+ // Unknown action
+ UNKNOWN = 0;
+ // Apply config action
+ APPLY = 1;
+ // Test config action (This will be implemented in a future release)
+ TEST = 2;
+ // Rollback config action (This will be implemented in a future release)
+ ROLLBACK = 3;
+ // Return config action (This will be implemented in a future release)
+ RETURN = 4;
+ // Force config apply action
+ FORCE = 5;
+}
+
+// Represents a NGINX config
+message NginxConfig {
+ // NGINX config action
+ NginxConfigAction action = 1 [(gogoproto.jsontag) = "action"];
+ // Metadata information about the configuration
+ ConfigDescriptor config_data = 2 [(gogoproto.jsontag) = "config_data"];
+ // Zipped file of all NGINX config files
+ ZippedFile zconfig = 3 [(gogoproto.jsontag) = "zconfig"];
+ // Zipped file of all auxiliary files
+ ZippedFile zaux = 4 [(gogoproto.jsontag) = "zaux"];
+ // Information about all access log files
+ AccessLogs access_logs = 5 [(gogoproto.jsontag) = "access_logs"];
+ // Information about all error log files
+ ErrorLogs error_logs = 6 [(gogoproto.jsontag) = "error_logs"];
+ // Information about all SSL certificates files
+ SslCertificates ssl = 7 [(gogoproto.jsontag) = "ssl"];
+ // Directory map of all config and aux files
+ DirectoryMap directory_map = 8 [(gogoproto.jsontag) = "directory_map"];
+}
+
+// Represents access log files
+message AccessLogs {
+ // List of access log files
+ repeated AccessLog access_log = 1 [(gogoproto.jsontag) = "access_log"];
+}
+
+// Represents an access log file
+message AccessLog {
+ // Name of file
+ string name = 1 [(gogoproto.jsontag) = "name"];
+ // Format of the file
+ string format = 2 [(gogoproto.jsontag) = "format"];
+ // File Permissions
+ string permissions = 3 [(gogoproto.jsontag) = "permissions"];
+ // Determines if the file is readable or not
+ bool readable = 4 [(gogoproto.jsontag) = "readable"];
+}
+
+// Represents error log files
+message ErrorLogs {
+ // List of error log files
+ repeated ErrorLog error_log = 1 [(gogoproto.jsontag) = "error_log"];
+}
+
+// Represents an error log file
+message ErrorLog {
+ // Name of file
+ string name = 1 [(gogoproto.jsontag) = "name"];
+ // Log level
+ string log_level = 2 [(gogoproto.jsontag) = "log_level"];
+ // File Permissions
+ string permissions = 3 [(gogoproto.jsontag) = "permissions"];
+ // Determines if the file is readable or not
+ bool readable = 4 [(gogoproto.jsontag) = "readable"];
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/traverser.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/traverser.go
new file mode 100644
index 000000000..3e77e19c7
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/traverser.go
@@ -0,0 +1,99 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package sdk
+
+import (
+ "github.com/nginxinc/nginx-go-crossplane"
+)
+
+type CrossplaneTraverseCallback = func(parent *crossplane.Directive, current *crossplane.Directive) (bool, error)
+type CrossplaneTraverseCallbackStr = func(parent *crossplane.Directive, current *crossplane.Directive) string
+
+func traverse(root *crossplane.Directive, callback CrossplaneTraverseCallback, stop *bool) error {
+ if *stop {
+ return nil
+ }
+ for _, child := range root.Block {
+ result, err := callback(root, child)
+ if err != nil {
+ return err
+ }
+
+ if !result {
+ *stop = true
+ return nil
+ }
+
+ err = traverse(child, callback, stop)
+
+ if err != nil {
+ return err
+ }
+
+ if *stop {
+ return nil
+ }
+ }
+ return nil
+}
+
+func traverseStr(root *crossplane.Directive, callback CrossplaneTraverseCallbackStr, stop *bool) string {
+ response := ""
+ if *stop {
+ return ""
+ }
+ for _, child := range root.Block {
+ response = callback(root, child)
+ if response != "" {
+ *stop = true
+ return response
+ }
+ response = traverseStr(child, callback, stop)
+ if *stop {
+ return response
+ }
+ }
+ return response
+}
+
+func CrossplaneConfigTraverse(root *crossplane.Config, callback CrossplaneTraverseCallback) error {
+ stop := false
+ for _, dir := range root.Parsed {
+ result, err := callback(nil, dir)
+ if err != nil {
+ return err
+ }
+
+ if !result {
+ return nil
+ }
+
+ err = traverse(dir, callback, &stop)
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func CrossplaneConfigTraverseStr(root *crossplane.Config, callback CrossplaneTraverseCallbackStr) string {
+ stop := false
+ response := ""
+ for _, dir := range root.Parsed {
+ response = callback(nil, dir)
+ if response != "" {
+ return response
+ }
+ response = traverseStr(dir, callback, &stop)
+ if response != "" {
+ return response
+ }
+ }
+ return response
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/sdk/v2/zip/zipped_file.go b/test/integration/vendor/github.com/nginx/agent/sdk/v2/zip/zipped_file.go
new file mode 100644
index 000000000..3abfd3a52
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/sdk/v2/zip/zipped_file.go
@@ -0,0 +1,226 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+// Package zip provide convenience utilities to work with config files.
+package zip
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "sync"
+
+ "github.com/nginx/agent/sdk/v2/checksum"
+ "github.com/nginx/agent/sdk/v2/files"
+ "github.com/nginx/agent/sdk/v2/proto"
+)
+
+var (
+ ErrFlushed = errors.New("zipped file: already flushed")
+)
+
+const (
+ DefaultFileMode = 0644
+)
+
+// Writer is a helper for building the proto ZippedFile for sending with multiple file contents.
+// Uses in memory bytes.Buffer, should be extended if larger file content is expected.
+type Writer struct {
+ sync.Mutex
+ flushed bool
+ prefix string
+ wrote int
+ buf *bytes.Buffer
+ gzip *gzip.Writer
+ writer *tar.Writer
+}
+
+type Reader struct {
+ prefix string
+ gzip *gzip.Reader
+ reader *tar.Reader
+}
+
+// NewWriter returns a writer for to create the ZippedFile proto.
+func NewWriter(prefix string) (*Writer, error) {
+ if prefix == "" {
+ return nil, fmt.Errorf("zip prefix path can not be empty")
+ }
+ b := bytes.Buffer{}
+ gz := gzip.NewWriter(&b)
+ return &Writer{
+ prefix: prefix,
+ buf: &b,
+ gzip: gz,
+ writer: tar.NewWriter(gz),
+ }, nil
+}
+
+// Payloads returns the content, prefix, and checksum for the files written to the writer.
+func (z *Writer) Payloads() ([]byte, string, string, error) {
+ z.Lock()
+ defer z.Unlock()
+ if z.flushed {
+ return nil, "", "", ErrFlushed
+ }
+ // close the writer, so it flushes to the buffer, this also means we can/should only
+ // do this once.
+ if err := z.writer.Close(); err != nil {
+ return nil, "", "", err
+ }
+ if err := z.gzip.Close(); err != nil {
+ return nil, "", "", err
+ }
+ z.flushed = true
+ content := z.content()
+ return content, z.prefix, checksum.HexChecksum(content), nil
+}
+
+func (z *Writer) Proto() (*proto.ZippedFile, error) {
+ var err error
+ p := &proto.ZippedFile{}
+ p.Contents, p.RootDirectory, p.Checksum, err = z.Payloads()
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+func (z *Writer) Add(name string, mode os.FileMode, r io.Reader) error {
+ z.Lock()
+ defer z.Unlock()
+ if z.flushed {
+ return ErrFlushed
+ }
+ b := bytes.NewBuffer([]byte{})
+ n, err := io.Copy(b, r)
+ if err != nil {
+ return fmt.Errorf("zipped file: copy error %s", err)
+ }
+ z.wrote++
+ h := &tar.Header{
+ Name: name,
+ Mode: int64(mode),
+ Size: n,
+ }
+ err = z.writer.WriteHeader(h)
+ if err != nil {
+ return fmt.Errorf("zipped file: write header error %s", err)
+ }
+ _, err = z.writer.Write(b.Bytes())
+ if err != nil {
+ return fmt.Errorf("zipped file: write error %s", err)
+ }
+ return err
+}
+
+func (z *Writer) AddFile(fullPath string) error {
+ r, err := os.Open(fullPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = r.Close()
+ }()
+ return z.Add(fullPath, DefaultFileMode, r)
+}
+
+func (z *Writer) FileLen() int {
+ return z.wrote
+}
+
+func (z *Writer) content() []byte {
+ return z.buf.Bytes()
+}
+
+// NewReader returns a Reader to help with extracting the files from the provided ZippedFile proto.
+func NewReader(p *proto.ZippedFile) (*Reader, error) {
+ return NewReaderFromPayloads(p.Contents, p.RootDirectory, p.Checksum)
+}
+
+// NewReaderFromPayloads returns a Reader to help with extracting the provided zipped content
+func NewReaderFromPayloads(content []byte, prefix, cs string) (*Reader, error) {
+ validateChecksum := checksum.HexChecksum(content)
+
+ if validateChecksum != cs {
+ return nil, fmt.Errorf("checksum validation failed %x", validateChecksum)
+ }
+ gz, err := gzip.NewReader(bytes.NewReader(content))
+ if err != nil {
+ return nil, err
+ }
+
+ return &Reader{
+ gzip: gz,
+ reader: tar.NewReader(gz),
+ prefix: prefix,
+ }, nil
+}
+
+type FileReaderCallback = func(err error, path string, mode os.FileMode, r io.Reader) bool
+
+// RangeFileReaders calls f sequentially for each file in the zip archive. If f returns false, range stops the iteration.
+func (r *Reader) RangeFileReaders(callback FileReaderCallback) {
+ for {
+ header, err := r.reader.Next()
+
+ if err == io.EOF {
+ break
+ }
+
+ if header == nil {
+ continue
+ }
+ callback(err, header.Name, os.FileMode(header.Mode), r.reader)
+ }
+}
+
+func (r *Reader) Prefix() string {
+ return r.prefix
+}
+
+func (r *Reader) Close() error {
+ return r.gzip.Close()
+}
+
+func UnPack(zipFile *proto.ZippedFile) ([]*proto.File, error) {
+ zipContentsReader, err := NewReader(zipFile)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ _ = zipContentsReader.Close()
+ }()
+
+ rawFiles := make([]*proto.File, 0)
+ zipContentsReader.RangeFileReaders(func(err error, path string, mode os.FileMode, rc io.Reader) bool {
+ if err != nil {
+ log.Print(err)
+ }
+
+ b := bytes.NewBuffer([]byte{})
+ _, err = io.Copy(b, rc)
+
+ if err != nil {
+ return false
+ }
+
+ rawFiles = append(rawFiles, &proto.File{
+ Name: path,
+ Permissions: files.GetPermissions(mode),
+ Contents: b.Bytes(),
+ })
+ return true
+ })
+ return rawFiles, err
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/checksum.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/checksum.go
new file mode 100644
index 000000000..834fea3ef
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/checksum.go
@@ -0,0 +1,21 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "crypto/sha256"
+ "fmt"
+)
+
+// GenerateNginxID used to get the NGINX ID
+func GenerateNginxID(format string, a ...interface{}) string {
+ h := sha256.New()
+ s := fmt.Sprintf(format, a...)
+ _, _ = h.Write([]byte(s))
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/commands.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/commands.go
new file mode 100644
index 000000000..8321b83c3
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/commands.go
@@ -0,0 +1,75 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package config
+
+import (
+ "os"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var ROOT_COMMAND = &cobra.Command{
+ Use: "nginx-agent [flags]",
+ Short: "nginx-agent",
+}
+
+var COMPLETION_COMMAND = &cobra.Command{
+ Use: "completion [bash|zsh|fish]",
+ Short: "Generate completion script.",
+ Long: `To load completions:
+
+Bash:
+
+$ source <(nginx-agent completion bash)
+
+# To load completions for each session, execute once:
+Linux:
+ $ nginx-agent completion bash > /etc/bash_completion.d/nginx-agent
+MacOS:
+ $ nginx-agent completion bash > /usr/local/etc/bash_completion.d/nginx-agent
+
+Zsh:
+
+# If shell completion is not already enabled in your environment you will need
+# to enable it. You can execute the following once:
+
+$ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+# To load completions for each session, execute once:
+$ nginx-agent completion zsh > "${fpath[1]}/_nginx-agent"
+
+# You will need to start a new shell for this setup to take effect.
+
+Fish:
+
+$ nginx-agent completion fish | source
+
+# To load completions for each session, execute once:
+$ nginx-agent completion fish > ~/.config/fish/completions/nginx-agent.fish
+`,
+ DisableFlagsInUseLine: true,
+ ValidArgs: []string{"bash", "zsh", "fish"},
+ Args: cobra.ExactValidArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ var err error
+
+ switch args[0] {
+ case "bash":
+ err = cmd.Root().GenBashCompletion(os.Stdout)
+ case "zsh":
+ err = cmd.Root().GenZshCompletion(os.Stdout)
+ case "fish":
+ err = cmd.Root().GenFishCompletion(os.Stdout, true)
+ }
+
+ if err != nil {
+ log.Warnf("Error sending command: %v", err)
+ }
+ },
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/config.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/config.go
new file mode 100644
index 000000000..363c68c6d
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/config.go
@@ -0,0 +1,458 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package config
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+
+ agent_config "github.com/nginx/agent/sdk/v2/agent/config"
+ advanced_metrics "github.com/nginx/agent/v2/src/extensions/advanced-metrics/pkg/advanced-metrics"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "gopkg.in/yaml.v3"
+
+ log "github.com/sirupsen/logrus"
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ dynamicConfigUsageComment = `#
+# /etc/nginx-agent/dynamic-agent.conf
+#
+# Dynamic configuration file for NGINX Agent.
+#
+# The purpose of this file is to track agent configuration
+# values that can be dynamically changed via the API and the agent install script.
+# You may edit this file, but API calls that modify the tags on this system will
+# overwrite the tag values in this file.
+#
+# The agent configuration values that API calls can modify are as follows:
+# - tags
+#
+# The agent configuration values that the agent install script can modify are as follows:
+# - instance_group
+
+`
+)
+
+var (
+ Viper = viper.NewWithOptions(viper.KeyDelimiter(agent_config.KeyDelimiter))
+)
+
+func SetVersion(version, commit string) {
+ ROOT_COMMAND.Version = version + "-" + commit
+}
+
+func Execute() error {
+ ROOT_COMMAND.AddCommand(COMPLETION_COMMAND)
+ return ROOT_COMMAND.Execute()
+}
+
+func SetDefaults() {
+ // CLOUDACCOUNTID DEFAULT
+ Viper.SetDefault(CloudAccountIdKey, Defaults.CloudAccountID)
+
+ // SERVER DEFAULTS
+ Viper.SetDefault(ServerMetrics, Defaults.Server.Metrics)
+ Viper.SetDefault(ServerCommand, Defaults.Server.Command)
+
+ // DATAPLANE DEFAULTS
+ Viper.SetDefault(DataplaneStatusPoll, Defaults.Dataplane.Status.PollInterval)
+
+ // METRICS DEFAULTS
+ Viper.SetDefault(MetricsBulkSize, Defaults.AgentMetrics.BulkSize)
+ Viper.SetDefault(MetricsReportInterval, Defaults.AgentMetrics.ReportInterval)
+ Viper.SetDefault(MetricsCollectionInterval, Defaults.AgentMetrics.CollectionInterval)
+
+ // NGINX DEFAULTS
+ Viper.SetDefault(NginxClientVersion, Defaults.Nginx.NginxClientVersion)
+}
+
+func SetNginxAppProtectDefaults() {
+ Viper.SetDefault(NginxAppProtectReportInterval, Defaults.NginxAppProtect.ReportInterval)
+ Viper.SetDefault(NginxAppProtectPrecompiledPublication, Defaults.NginxAppProtect.PrecompiledPublication)
+}
+
+func SetNAPMonitoringDefaults() {
+ Viper.SetDefault(NAPMonitoringCollectorBufferSize, Defaults.NAPMonitoring.CollectorBufferSize)
+ Viper.SetDefault(NAPMonitoringProcessorBufferSize, Defaults.NAPMonitoring.ProcessorBufferSize)
+ Viper.SetDefault(NAPMonitoringSyslogIP, Defaults.NAPMonitoring.SyslogIP)
+ Viper.SetDefault(NAPMonitoringSyslogPort, Defaults.NAPMonitoring.SyslogPort)
+ Viper.SetDefault(NAPMonitoringReportInterval, Defaults.NAPMonitoring.ReportInterval)
+ Viper.SetDefault(NAPMonitoringReportCount, Defaults.NAPMonitoring.ReportCount)
+}
+
+func setFlagDeprecated(name string, usageMessage string) {
+ err := ROOT_COMMAND.Flags().MarkDeprecated(name, usageMessage)
+ if err != nil {
+ log.Warnf("error occurred deprecating flag %s: %v", name, err)
+ }
+}
+
+func deprecateFlags() {
+ setFlagDeprecated("api-token", "DEPRECATED. API Token is no longer set. No replacement command.")
+ setFlagDeprecated("location", "DEPRECATED. Set through APIs. No replacement command.")
+ setFlagDeprecated("metadata", "DEPRECATED. Use tags instead.")
+ setFlagDeprecated("metrics-server", "DEPRECATED. Use server instead.")
+ setFlagDeprecated("metrics-tls-ca", "DEPRECATED. metrics-tls-ca has been replaced by tls-ca")
+ setFlagDeprecated("metrics-tls-cert", "DEPRECATED. metrics-tls-cert has been replaced by tls-cert")
+ setFlagDeprecated("metrics-tls-enable", "DEPRECATED. metrics-tls-enable has been replaced by tls-enable")
+ setFlagDeprecated("metrics-tls-key", "DEPRECATED. metrics-tls-key has been replaced by tls-key")
+ setFlagDeprecated("nginx-bin-path", "DEPRECATED. nginx-bin-path is no longer used. The agent strives to discover the nginx instances on the dataplane")
+ setFlagDeprecated("nginx-metrics-poll-interval", "DEPRECATED. nginx-metrics-poll-interval has been replaced by metrics-report-interval and metrics-collection-interval")
+ setFlagDeprecated("nginx-pid-path", "DEPRECATED. nginx-pid-path is no longer used. The agent strives to discover the nginx instances on the dataplane")
+ setFlagDeprecated("nginx-plus-api", "DEPRECATED. nginx-plus-api is no longer used. The agent strives to discover the nginx instances on the dataplane and read this from the configuration file")
+ setFlagDeprecated("nginx-stub-status", "DEPRECATED. nginx-stub-status is no longer used. The agent strives to discover the nginx instances on the dataplane and read this from the configuration file")
+ setFlagDeprecated("server", "DEPRECATED. server has been replaced with the new server structure")
+ setFlagDeprecated("dataplane-sync-enable", "DEPRECATED. server has been replaced with the new server structure")
+ setFlagDeprecated("dataplane-events-enable", "DEPRECATED. server has been replaced with the new server structure")
+}
+
+func RegisterFlags() {
+ Viper.SetEnvPrefix(EnvPrefix)
+ Viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
+ Viper.AutomaticEnv()
+
+ fs := ROOT_COMMAND.Flags()
+ for _, f := range append(agentFlags, deprecatedFlags...) {
+ f.register(fs)
+ }
+
+ fs.SetNormalizeFunc(wordSepNormalizeFunc)
+ deprecateFlags()
+
+ fs.VisitAll(func(flag *flag.Flag) {
+ if err := Viper.BindPFlag(strings.ReplaceAll(flag.Name, "-", "_"), fs.Lookup(flag.Name)); err != nil {
+ return
+ }
+ err := Viper.BindEnv(flag.Name)
+ if err != nil {
+ log.Warnf("error occurred binding env %s: %v", flag.Name, err)
+ }
+ })
+}
+
+func RegisterConfigFile(dynamicConfFilePath string, confFileName string, confPaths ...string) (string, error) {
+ cfg, err := SeekConfigFileInPaths(confFileName, confPaths...)
+ if err != nil {
+ return cfg, err
+ }
+
+ SetDynamicConfigFileAbsPath(dynamicConfFilePath)
+ if err := LoadPropertiesFromFile(cfg); err != nil {
+ log.Fatalf("Unable to load properties from config files (%s, %s) - %v", cfg, dynamicConfFilePath, err)
+ }
+
+ return cfg, nil
+}
+
+func RegisterRunner(r func(cmd *cobra.Command, args []string)) {
+ ROOT_COMMAND.Run = r
+}
+
+func GetConfig(clientId string) (*Config, error) {
+ config := &Config{
+ Path: Viper.GetString(ConfigPathKey),
+ DynamicConfigPath: Viper.GetString(DynamicConfigPathKey),
+ ClientID: clientId,
+ CloudAccountID: Viper.GetString(CloudAccountIdKey),
+ Server: getServer(),
+ AgentAPI: getAgentAPI(),
+ ConfigDirs: Viper.GetString(ConfigDirsKey),
+ Log: getLog(),
+ TLS: getTLS(),
+ Nginx: getNginx(),
+ Dataplane: getDataplane(),
+ AgentMetrics: getMetrics(),
+ Features: Viper.GetStringSlice(agent_config.FeaturesKey),
+ Tags: Viper.GetStringSlice(TagsKey),
+ Updated: filePathUTime(Viper.GetString(DynamicConfigPathKey)),
+ AllowedDirectoriesMap: map[string]struct{}{},
+ DisplayName: Viper.GetString(DisplayNameKey),
+ InstanceGroup: Viper.GetString(InstanceGroupKey),
+ NginxAppProtect: getNginxAppProtect(),
+ NAPMonitoring: getNAPMonitoring(),
+ AdvancedMetrics: getAdvancedMetrics(),
+ }
+
+ for _, dir := range strings.Split(config.ConfigDirs, ":") {
+ if dir != "" {
+ config.AllowedDirectoriesMap[dir] = struct{}{}
+ }
+ }
+ config.Server.Target = fmt.Sprintf("%s:%d", config.Server.Host, config.Server.GrpcPort)
+
+ log.Tracef("%v", config)
+ return config, nil
+}
+
+// UpdateAgentConfig updates the Agent config on disk with the tags and features that are
+// passed into it. A bool is returned indicating if the Agent config was
+// overwritten or not.
+func UpdateAgentConfig(systemId string, updateTags []string, updateFeatures []string) (bool, error) {
+ // Get current config on disk
+ config, err := GetConfig(systemId)
+ if err != nil {
+ log.Errorf("Failed to register config: %v", err)
+ return false, err
+ }
+
+ // Update nil valued updateTags to empty slice for comparison
+ if updateTags == nil {
+ updateTags = []string{}
+ }
+
+ if updateFeatures == nil {
+ updateFeatures = []string{}
+ }
+
+ // Sort tags and compare them
+ sort.Strings(updateTags)
+ sort.Strings(config.Tags)
+ synchronizedTags := reflect.DeepEqual(updateTags, config.Tags)
+
+ Viper.Set(TagsKey, updateTags)
+ config.Tags = Viper.GetStringSlice(TagsKey)
+
+ sort.Strings(updateFeatures)
+ sort.Strings(config.Features)
+ synchronizedFeatures := reflect.DeepEqual(updateFeatures, config.Features)
+
+ Viper.Set(agent_config.FeaturesKey, updateFeatures)
+ config.Features = Viper.GetStringSlice(agent_config.FeaturesKey)
+
+ // If the features are already synchronized there is no need to overwrite
+ if synchronizedTags && synchronizedFeatures {
+ log.Debug("Manager and Local tags and features are already synchronized")
+ return false, nil
+ }
+
+ // Get the dynamic config path and use default dynamic config path if it's not
+ // already set.
+ dynamicCfgPath := Viper.GetString(DynamicConfigPathKey)
+ if dynamicCfgPath == "" {
+ dynamicCfgPath = DynamicConfigFileAbsPath
+ }
+
+ // Overwrite existing nginx-agent.conf with updated config
+ updatedConfBytes, err := yaml.Marshal(config)
+ if err != nil {
+ return false, err
+ }
+
+ updatedConfBytes = append([]byte(dynamicConfigUsageComment), updatedConfBytes...)
+
+ err = ioutil.WriteFile(dynamicCfgPath, updatedConfBytes, 0)
+ if err != nil {
+ return false, err
+ }
+
+ config.Updated = filePathUTime(dynamicCfgPath)
+
+ log.Infof("Successfully updated agent config (%s)", dynamicCfgPath)
+
+ return true, nil
+}
+
+func getMetrics() AgentMetrics {
+ return AgentMetrics{
+ BulkSize: Viper.GetInt(MetricsBulkSize),
+ ReportInterval: Viper.GetDuration(MetricsReportInterval),
+ CollectionInterval: Viper.GetDuration(MetricsCollectionInterval),
+ Mode: Viper.GetString(MetricsMode),
+ }
+}
+
+func getAdvancedMetrics() AdvancedMetrics {
+ return AdvancedMetrics{
+ SocketPath: Viper.GetString(AdvancedMetricsSocketPath),
+ AggregationPeriod: Viper.GetDuration(AdvancedMetricsAggregationPeriod),
+ PublishingPeriod: Viper.GetDuration(AdvancedMetricsPublishPeriod),
+ TableSizesLimits: advanced_metrics.TableSizesLimits{
+ StagingTableMaxSize: Viper.GetInt(AdvancedMetricsTableSizesLimitsSTMS),
+ StagingTableThreshold: Viper.GetInt(AdvancedMetricsTableSizesLimitsSTT),
+ PriorityTableMaxSize: Viper.GetInt(AdvancedMetricsTableSizesLimitsPTMS),
+ PriorityTableThreshold: Viper.GetInt(AdvancedMetricsTableSizesLimitsPTT),
+ },
+ }
+}
+
+func getLog() LogConfig {
+ return LogConfig{
+ Level: Viper.GetString(LogLevel),
+ Path: Viper.GetString(LogPath),
+ }
+}
+
+func getDataplane() Dataplane {
+ return Dataplane{
+ Status: Status{
+ PollInterval: Viper.GetDuration(DataplaneStatusPoll),
+ ReportInterval: Viper.GetDuration(DataplaneStatusReportInterval),
+ },
+ }
+}
+
+func getNginxAppProtect() NginxAppProtect {
+ return NginxAppProtect{
+ ReportInterval: Viper.GetDuration(NginxAppProtectReportInterval),
+ PrecompiledPublication: Viper.GetBool(NginxAppProtectPrecompiledPublication),
+ }
+}
+
+func getNAPMonitoring() NAPMonitoring {
+ return NAPMonitoring{
+ CollectorBufferSize: Viper.GetInt(NAPMonitoringCollectorBufferSize),
+ ProcessorBufferSize: Viper.GetInt(NAPMonitoringProcessorBufferSize),
+ SyslogIP: Viper.GetString(NAPMonitoringSyslogIP),
+ SyslogPort: Viper.GetInt(NAPMonitoringSyslogPort),
+ ReportInterval: Viper.GetDuration(NAPMonitoringReportInterval),
+ ReportCount: Viper.GetInt(NAPMonitoringReportCount),
+ }
+}
+
+func getNginx() Nginx {
+ return Nginx{
+ ExcludeLogs: Viper.GetString(NginxExcludeLogs),
+ Debug: Viper.GetBool(NginxDebug),
+ NginxCountingSocket: Viper.GetString(NginxCountingSocket),
+ NginxClientVersion: Viper.GetInt(NginxClientVersion),
+ }
+}
+
+func getServer() Server {
+ return Server{
+ Host: Viper.GetString(ServerHost),
+ GrpcPort: Viper.GetInt(ServerGrpcPort),
+ Token: Viper.GetString(ServerToken),
+ Metrics: Viper.GetString(ServerMetrics),
+ Command: Viper.GetString(ServerCommand),
+ }
+}
+
+func getAgentAPI() AgentAPI {
+ return AgentAPI{
+ Port: Viper.GetInt(AgentAPIPort),
+ Cert: Viper.GetString(AgentAPICert),
+ Key: Viper.GetString(AgentAPIKey),
+ }
+}
+
+func getTLS() TLSConfig {
+ return TLSConfig{
+ Enable: Viper.GetBool(TlsEnable),
+ Cert: Viper.GetString(TlsCert),
+ Key: Viper.GetString(TlsPrivateKey),
+ Ca: Viper.GetString(TlsCa),
+ SkipVerify: Viper.GetBool(TlsSkipVerify),
+ }
+}
+
+func LoadPropertiesFromFile(cfg string) error {
+ Viper.SetConfigFile(cfg)
+ Viper.SetConfigType(ConfigFileType)
+ err := Viper.MergeInConfig()
+ if err != nil {
+ return fmt.Errorf("error loading config file %s: %v", cfg, err)
+ }
+
+ // Get the dynamic config path and use default dynamic config path if it's not
+ // already set.
+ dynamicCfgPath := Viper.GetString(DynamicConfigPathKey)
+ if dynamicCfgPath == "" {
+ dynamicCfgPath = DynamicConfigFileAbsPath
+ }
+ dynamicCfgDir, dynamicCfgFile := filepath.Split(dynamicCfgPath)
+
+ // Get dynamic file, if it doesn't exist create it.
+ file, err := os.Stat(dynamicCfgPath)
+ if err != nil {
+ log.Warnf("Unable to read dynamic config (%s), got the following error: %v", dynamicCfgPath, err)
+ }
+
+ if file == nil {
+ log.Infof("Writing the following file to disk: %s", dynamicCfgPath)
+ err = os.MkdirAll(dynamicCfgDir, 0755)
+ if err != nil {
+ return fmt.Errorf("error attempting to create directory for dynamic config (%s), got the following error: %v", dynamicCfgDir, err)
+ }
+
+ err = os.WriteFile(dynamicCfgPath, []byte(dynamicConfigUsageComment), 0644)
+ if err != nil {
+ return fmt.Errorf("error attempting to create dynamic config (%s), got the following error: %v", dynamicCfgPath, err)
+ }
+ }
+
+ // Load properties from existing file
+ log.Debugf("Loading dynamic properties from file: %s", dynamicCfgPath)
+ Viper.AddConfigPath(dynamicCfgDir)
+ Viper.SetConfigName(dynamicCfgFile)
+ err = Viper.MergeInConfig()
+ if err != nil {
+ return fmt.Errorf("error loading file %s: %v", dynamicCfgPath, err)
+ }
+
+ return nil
+}
+
+func SetDynamicConfigFileAbsPath(dynamicCfgPath string) {
+ Viper.Set(DynamicConfigPathKey, dynamicCfgPath)
+ log.Debugf("Set dynamic agent config file: %s", dynamicCfgPath)
+}
+
+func wordSepNormalizeFunc(f *flag.FlagSet, name string) flag.NormalizedName {
+ from := []string{"_", "."}
+ to := "-"
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return flag.NormalizedName(name)
+}
+
+func SeekConfigFileInPaths(configName string, searchPaths ...string) (string, error) {
+ for _, p := range searchPaths {
+ f := filepath.Join(p, configName)
+ if _, err := os.Stat(f); err == nil {
+ return f, nil
+ }
+ }
+ return "", fmt.Errorf("a valid configuration has not been found in any of the search paths.")
+}
+
+func filePathUTime(path string) time.Time {
+ s, err := os.Stat(path)
+ if err != nil {
+ log.Warnf("Unable to determine the modified time of %s: %s. Defaulting the value to Now.", path, err)
+ return time.Now()
+ }
+ return s.ModTime()
+}
+
+func CheckAndSetDefault(attribute interface{}, defaultValue interface{}) {
+ if value, ok := attribute.(*string); ok {
+ if *value == "" {
+ *value = defaultValue.(string)
+ }
+ } else if value, ok := attribute.(*time.Duration); ok {
+ if *value == 0*time.Second {
+ *value = defaultValue.(time.Duration)
+ }
+ } else if value, ok := attribute.(*int); ok {
+ if *value == int(0) {
+ *value = defaultValue.(int)
+ }
+ }
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/defaults.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/defaults.go
new file mode 100644
index 000000000..871c3e726
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/defaults.go
@@ -0,0 +1,469 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package config
+
+import (
+ "os"
+ "time"
+
+ agent_config "github.com/nginx/agent/sdk/v2/agent/config"
+
+ "github.com/google/uuid"
+ log "github.com/sirupsen/logrus"
+)
+
+func ConfigFilePaths() []string {
+ paths := []string{
+ "/etc/nginx-manager/",
+ "/etc/nginx-agent/",
+ // Support for BSD style file hierarchy: https://www.freebsd.org/cgi/man.cgi?hier(7)
+ // To keep them separate from the base system, user-installed applications are installed and configured under /usr/local/
+ "/usr/local/etc/nginx-agent/",
+ }
+
+ path, err := os.Getwd()
+ if err == nil {
+ paths = append(paths, path)
+ } else {
+ log.Warn("unable to determine process's current directory")
+ }
+
+ return paths
+}
+
+var (
+ Defaults = &Config{
+ CloudAccountID: uuid.New().String(),
+ Log: LogConfig{
+ Level: "info",
+ Path: "/var/log/nginx-agent",
+ },
+ Server: Server{
+ Command: "",
+ Metrics: "",
+ // token needs to be validated on the server side - can be overridden by the config value or the cli / environment variable
+ // so setting to random uuid at the moment, tls connection won't work without the auth header
+ Token: uuid.New().String(),
+ },
+ Nginx: Nginx{
+ Debug: false,
+ NginxCountingSocket: "unix:/var/run/nginx-agent/nginx.sock",
+ NginxClientVersion: 6,
+ },
+ ConfigDirs: "/etc/nginx:/usr/local/etc/nginx:/usr/share/nginx/modules:/etc/nms",
+ AllowedDirectoriesMap: map[string]struct{}{},
+ TLS: TLSConfig{
+ Enable: false,
+ SkipVerify: false,
+ },
+ Dataplane: Dataplane{
+ Status: Status{
+ PollInterval: 30 * time.Second,
+ ReportInterval: 24 * time.Hour,
+ },
+ },
+ AgentMetrics: AgentMetrics{
+ BulkSize: 20,
+ ReportInterval: 1 * time.Minute,
+ CollectionInterval: 15 * time.Second,
+ Mode: "aggregation",
+ },
+ Features: agent_config.GetDefaultFeatures(),
+ NAPMonitoring: NAPMonitoring{
+ ProcessorBufferSize: 50000,
+ CollectorBufferSize: 50000,
+ SyslogIP: "0.0.0.0",
+ SyslogPort: 514,
+ ReportInterval: time.Minute,
+ ReportCount: 400,
+ },
+ NginxAppProtect: NginxAppProtect{
+ PrecompiledPublication: false,
+ },
+ }
+ AllowedDirectoriesMap map[string]struct{}
+)
+
+const (
+ DynamicConfigFileName = "agent-dynamic.conf"
+ DynamicConfigFileAbsPath = "/etc/nginx-agent/agent-dynamic.conf"
+ ConfigFileName = "nginx-agent.conf"
+ ConfigFileType = "yaml"
+ EnvPrefix = "nms"
+ ConfigPathKey = "path"
+ DynamicConfigPathKey = "dynamic-config-path"
+
+ CloudAccountIdKey = "cloudaccountid"
+ LocationKey = "location"
+ DisplayNameKey = "display_name"
+ InstanceGroupKey = "instance_group"
+ ConfigDirsKey = "config_dirs"
+ TagsKey = "tags"
+
+ // viper keys used in config
+ LogKey = "log"
+
+ LogLevel = LogKey + agent_config.KeyDelimiter + "level"
+ LogPath = LogKey + agent_config.KeyDelimiter + "path"
+
+ // viper keys used in config
+ ServerKey = "server"
+
+ ServerHost = ServerKey + agent_config.KeyDelimiter + "host"
+ ServerGrpcPort = ServerKey + agent_config.KeyDelimiter + "grpcport"
+ ServerToken = ServerKey + agent_config.KeyDelimiter + "token"
+ ServerMetrics = ServerKey + agent_config.KeyDelimiter + "metrics"
+ ServerCommand = ServerKey + agent_config.KeyDelimiter + "command"
+
+ // viper keys used in config
+ APIKey = "api"
+
+ AgentAPIPort = APIKey + agent_config.KeyDelimiter + "port"
+ AgentAPICert = APIKey + agent_config.KeyDelimiter + "cert"
+ AgentAPIKey = APIKey + agent_config.KeyDelimiter + "key"
+
+ // viper keys used in config
+ TlsKey = "tls"
+
+ TlsEnable = TlsKey + agent_config.KeyDelimiter + "enable"
+ TlsCert = TlsKey + agent_config.KeyDelimiter + "cert"
+ TlsPrivateKey = TlsKey + agent_config.KeyDelimiter + "key"
+ TlsCa = TlsKey + agent_config.KeyDelimiter + "ca"
+ TlsSkipVerify = TlsKey + agent_config.KeyDelimiter + "skip_verify"
+
+ // viper keys used in config
+ NginxKey = "nginx"
+
+ NginxExcludeLogs = NginxKey + agent_config.KeyDelimiter + "exclude_logs"
+ NginxDebug = NginxKey + agent_config.KeyDelimiter + "debug"
+ NginxCountingSocket = NginxKey + agent_config.KeyDelimiter + "socket"
+ NginxClientVersion = NginxKey + agent_config.KeyDelimiter + "client_version"
+
+ // viper keys used in config
+ DataplaneKey = "dataplane"
+
+ DataplaneEventsEnable = DataplaneKey + agent_config.KeyDelimiter + "events_enable"
+ DataplaneSyncEnable = DataplaneKey + agent_config.KeyDelimiter + "sync_enable"
+ DataplaneStatusPoll = DataplaneKey + agent_config.KeyDelimiter + "status_poll_interval"
+ DataplaneStatusReportInterval = DataplaneKey + agent_config.KeyDelimiter + "report_interval"
+
+ // viper keys used in config
+ MetricsKey = "metrics"
+
+ MetricsBulkSize = MetricsKey + agent_config.KeyDelimiter + "bulk_size"
+ MetricsReportInterval = MetricsKey + agent_config.KeyDelimiter + "report_interval"
+ MetricsCollectionInterval = MetricsKey + agent_config.KeyDelimiter + "collection_interval"
+ MetricsMode = MetricsKey + agent_config.KeyDelimiter + "mode"
+
+ // viper keys used in config
+ AdvancedMetricsKey = "advanced_metrics"
+
+ AdvancedMetricsSocketPath = AdvancedMetricsKey + agent_config.KeyDelimiter + "socket_path"
+ AdvancedMetricsAggregationPeriod = AdvancedMetricsKey + agent_config.KeyDelimiter + "aggregation_period"
+ AdvancedMetricsPublishPeriod = AdvancedMetricsKey + agent_config.KeyDelimiter + "publishing_period"
+ AdvancedMetricsTableSizesLimits = AdvancedMetricsKey + agent_config.KeyDelimiter + "table_sizes_limits"
+ AdvancedMetricsTableSizesLimitsSTMS = AdvancedMetricsTableSizesLimits + agent_config.KeyDelimiter + "staging_table_max_size"
+ AdvancedMetricsTableSizesLimitsSTT = AdvancedMetricsTableSizesLimits + agent_config.KeyDelimiter + "staging_table_threshold"
+ AdvancedMetricsTableSizesLimitsPTMS = AdvancedMetricsTableSizesLimits + agent_config.KeyDelimiter + "priority_table_max_size"
+ AdvancedMetricsTableSizesLimitsPTT = AdvancedMetricsTableSizesLimits + agent_config.KeyDelimiter + "priority_table_threshold"
+
+ // viper keys used in config
+ NginxAppProtectKey = "nginx_app_protect"
+
+ NginxAppProtectReportInterval = NginxAppProtectKey + agent_config.KeyDelimiter + "report_interval"
+ NginxAppProtectPrecompiledPublication = NginxAppProtectKey + agent_config.KeyDelimiter + "precompiled_publication"
+
+ // viper keys used in config
+ NAPMonitoringKey = "nap_monitoring"
+
+ NAPMonitoringCollectorBufferSize = NAPMonitoringKey + agent_config.KeyDelimiter + "collector_buffer_size"
+ NAPMonitoringProcessorBufferSize = NAPMonitoringKey + agent_config.KeyDelimiter + "processor_buffer_size"
+ NAPMonitoringSyslogIP = NAPMonitoringKey + agent_config.KeyDelimiter + "syslog_ip"
+ NAPMonitoringSyslogPort = NAPMonitoringKey + agent_config.KeyDelimiter + "syslog_port"
+ NAPMonitoringReportInterval = NAPMonitoringKey + agent_config.KeyDelimiter + "report_interval"
+ NAPMonitoringReportCount = NAPMonitoringKey + agent_config.KeyDelimiter + "report_count"
+
+ // DEPRECATED KEYS
+ NginxBinPathKey = "nginx_bin_path"
+ NginxPIDPathKey = "nginx_pid_path"
+ NginxStubStatusURLKey = "nginx_stub_status"
+ NginxPlusAPIURLKey = "nginx_plus_api"
+ NginxMetricsPollKey = "nginx_metrics_poll_interval"
+
+ MetricsEnableTLSKey = "metrics_tls_enable"
+ MetricsTLSCertPathKey = "metrics_tls_cert"
+ MetricsTLSKeyPathKey = "metrics_tls_key"
+ MetricsTLSCAPathKey = "metrics_tls_ca"
+)
+
+var (
+ agentFlags = []Registrable{
+ &StringFlag{
+ Name: LogLevel,
+ Usage: "The desired verbosity level for logging messages from nginx-agent. Available options, in order of severity from highest to lowest, are: panic, fatal, error, info, debug, and trace.",
+ DefaultValue: Defaults.Log.Level,
+ },
+ &StringFlag{
+ Name: LogPath,
+ Usage: "The path to output log messages to. If the default path doesn't exist, log messages are output to stdout/stderr.",
+ DefaultValue: Defaults.Log.Path,
+ },
+ &StringFlag{
+ Name: ServerHost,
+ Usage: "The IP address of the server host. IPv4 addresses and hostnames are supported.",
+ },
+ &IntFlag{
+ Name: ServerGrpcPort,
+ Usage: "The desired GRPC port to use for nginx-agent traffic.",
+ },
+ &StringFlag{
+ Name: ServerToken,
+ Usage: "An authentication token that grants nginx-agent access to the commander and metrics services. Auto-generated by default.",
+ DefaultValue: Defaults.Server.Token,
+ },
+ &StringFlag{
+ Name: ServerMetrics,
+ Usage: "The name of the metrics server sent in the tls configuration.",
+ DefaultValue: Defaults.Server.Metrics,
+ },
+ &StringFlag{
+ Name: ServerCommand,
+ Usage: "The name of the command server sent in the tls configuration.",
+ DefaultValue: Defaults.Server.Command,
+ },
+ // API Config
+ &IntFlag{
+ Name: AgentAPIPort,
+ Usage: "The desired port to use for nginx-agent to expose for HTTP traffic.",
+ },
+ &StringFlag{
+ Name: AgentAPICert,
+ Usage: "The cert used by the Agent API.",
+ DefaultValue: "",
+ },
+ &StringFlag{
+ Name: AgentAPIKey,
+ Usage: "The key used by the Agent API.",
+ DefaultValue: "",
+ },
+ &StringFlag{
+ Name: ConfigDirsKey,
+ Usage: "Defines the paths that you want to grant nginx-agent read/write access to. This key is formatted as a string and follows Unix PATH format.",
+ DefaultValue: Defaults.ConfigDirs,
+ },
+ &StringSliceFlag{
+ Name: TagsKey,
+ Usage: "A comma-separated list of tags to add to the current instance or machine, to be used for inventory purposes.",
+ },
+ &StringSliceFlag{
+ Name: agent_config.FeaturesKey,
+ Usage: "A comma-separated list of features enabled for the agent.",
+ DefaultValue: agent_config.GetDefaultFeatures(),
+ },
+ // NGINX Config
+ &StringFlag{
+ Name: NginxExcludeLogs,
+ Usage: "One or more NGINX access log paths that you want to exclude from metrics collection. This key is formatted as a string and multiple values should be provided as a comma-separated list.",
+ },
+ &StringFlag{
+ Name: NginxCountingSocket,
+ Usage: "The NGINX Plus counting unix socket location.",
+ DefaultValue: Defaults.Nginx.NginxCountingSocket,
+ },
+ // Metrics
+ &DurationFlag{
+ Name: MetricsCollectionInterval,
+ Usage: "Sets the interval, in seconds, at which metrics are collected.",
+ DefaultValue: Defaults.AgentMetrics.CollectionInterval,
+ },
+ &StringFlag{
+ Name: MetricsMode,
+ Usage: "Sets the desired metrics collection mode: streaming or aggregation.",
+ DefaultValue: Defaults.AgentMetrics.Mode,
+ },
+ &IntFlag{
+ Name: MetricsBulkSize,
+ Usage: "The amount of metrics reports collected before sending the data back to the server.",
+ DefaultValue: Defaults.AgentMetrics.BulkSize,
+ },
+ &DurationFlag{
+ Name: MetricsReportInterval,
+ Usage: "The polling period specified for a single set of metrics being collected.",
+ DefaultValue: Defaults.AgentMetrics.ReportInterval,
+ },
+ // Advanced Metrics
+ &StringFlag{
+ Name: AdvancedMetricsSocketPath,
+ Usage: "The advanced metrics socket location.",
+ },
+ // change to advanced metrics collection interval
+ &DurationFlag{
+ Name: AdvancedMetricsAggregationPeriod,
+ Usage: "Sets the interval, in seconds, at which advanced metrics are collected.",
+ },
+ // change to advanced metrics report interval
+ &DurationFlag{
+ Name: AdvancedMetricsPublishPeriod,
+ Usage: "The polling period specified for a single set of advanced metrics being collected.",
+ },
+ &IntFlag{
+ Name: AdvancedMetricsTableSizesLimitsPTMS,
+ Usage: "Default Maximum Size of the Priority Table.",
+ },
+ &IntFlag{
+ Name: AdvancedMetricsTableSizesLimitsPTT,
+ Usage: "Default Threshold of the Priority Table - normally a value which is a percentage of the corresponding Default Maximum Size of the Priority Table (<100%, but its value is not an actual percentage, i.e 88%, rather 88%*AdvancedMetricsTableSizesLimitsPTMS).",
+ },
+ &IntFlag{
+ Name: AdvancedMetricsTableSizesLimitsSTMS,
+ Usage: "Default Maximum Size of the Staging Table.",
+ },
+ &IntFlag{
+ Name: AdvancedMetricsTableSizesLimitsSTT,
+ Usage: "AdvancedMetricsTableSizesLimitsSTT - Default Threshold of the Staging Table - normally a value which is a percentage of the corresponding Default Maximum Size of the Staging Table (<100%, but its value is not an actual percentage, i.e 88%, rather 88%*AdvancedMetricsTableSizesLimitsSTMS).",
+ },
+ // TLS Config
+ &BoolFlag{
+ Name: TlsEnable,
+ Usage: "Enables TLS for secure communications.",
+ DefaultValue: Defaults.TLS.Enable,
+ },
+ &StringFlag{
+ Name: TlsCert,
+ Usage: "The path to the certificate file to use for TLS.",
+ },
+ &StringFlag{
+ Name: TlsPrivateKey,
+ Usage: "The path to the certificate key file to use for TLS.",
+ },
+ &StringFlag{
+ Name: TlsCa,
+ Usage: "The path to the CA certificate file to use for TLS.",
+ },
+ &BoolFlag{
+ Name: TlsSkipVerify,
+ Usage: "Only intended for demonstration, sets InsecureSkipVerify for gRPC TLS credentials",
+ DefaultValue: Defaults.TLS.SkipVerify,
+ },
+ // Dataplane
+ &DurationFlag{
+ Name: DataplaneStatusPoll,
+ Usage: "The frequency the agent will check the dataplane for changes. Used as a \"heartbeat\" to keep the gRPC connections alive.",
+ DefaultValue: Defaults.Dataplane.Status.PollInterval,
+ },
+ &DurationFlag{
+ Name: DataplaneStatusReportInterval,
+ Usage: "The amount of time the agent will report on the dataplane. After this period of time it will send a snapshot of the dataplane information.",
+ DefaultValue: Defaults.Dataplane.Status.ReportInterval,
+ },
+ // Nginx App Protect
+ &DurationFlag{
+ Name: NginxAppProtectReportInterval,
+ Usage: "The period of time the agent will check for App Protect software changes on the dataplane",
+ },
+ &BoolFlag{
+ Name: NginxAppProtectPrecompiledPublication,
+ Usage: "Enables publication of NGINX App Protect pre-compiled content from an external source.",
+ DefaultValue: Defaults.NginxAppProtect.PrecompiledPublication,
+ },
+ // NAP Monitoring
+ &IntFlag{
+ Name: NAPMonitoringCollectorBufferSize,
+ Usage: "The buffer size used for the collection of events in the NGINX App Protect Monitoring extension.",
+ },
+ &IntFlag{
+ Name: NAPMonitoringProcessorBufferSize,
+ Usage: "The buffer size used by the processing of events in the NGINX App Protect Monitoring extension.",
+ },
+ &StringFlag{
+ Name: NAPMonitoringSyslogIP,
+ Usage: "The Syslog IP address the NGINX Agent would run on. This IP address would be used in the NGINX App Protect config to send logging events.",
+ },
+ &IntFlag{
+ Name: NAPMonitoringSyslogPort,
+ Usage: "The Syslog port the NGINX Agent would run on. This port would be used in the NGINX App Protect config to send logging events.",
+ },
+ // Other Config
+ &StringFlag{
+ Name: DisplayNameKey,
+ Usage: "The instance's 'name' value.",
+ },
+ &StringFlag{
+ Name: InstanceGroupKey,
+ Usage: "The instance's 'group' value.",
+ },
+ }
+ deprecatedFlags = []Registrable{
+ &StringFlag{
+ Name: "metadata",
+ Usage: "DEPRECATED; use --server-host instead.",
+ },
+ &StringFlag{
+ Name: ServerKey,
+ Usage: "DEPRECATED; use --server-grpcport instead.",
+ },
+ &StringFlag{
+ Name: "metrics_server",
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: "api_token",
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: DataplaneSyncEnable,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: DataplaneEventsEnable,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: LocationKey,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ // NGINX Config
+ &StringFlag{
+ Name: NginxBinPathKey,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: NginxPIDPathKey,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: NginxStubStatusURLKey,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &StringFlag{
+ Name: NginxPlusAPIURLKey,
+ Usage: "DEPRECATED; no replacement due to change in functionality.",
+ },
+ &DurationFlag{
+ Name: NginxMetricsPollKey,
+ Usage: "DEPRECATED; use --metrics-collection-interval instead.",
+ },
+ // Metrics TLS Config
+ &BoolFlag{
+ Name: MetricsEnableTLSKey,
+ Usage: "DEPRECATED; use --tls-enable instead.",
+ },
+ &StringFlag{
+ Name: MetricsTLSCertPathKey,
+ Usage: "DEPRECATED; use --tls-cert instead.",
+ },
+ &StringFlag{
+ Name: MetricsTLSKeyPathKey,
+ Usage: "DEPRECATED; use --tls-key instead.",
+ },
+ &StringFlag{
+ Name: MetricsTLSCAPathKey,
+ Usage: "DEPRECATED; use --tls-ca instead.",
+ },
+ }
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/flags.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/flags.go
new file mode 100644
index 000000000..a07c363ab
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/flags.go
@@ -0,0 +1,79 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package config
+
+import (
+ "time"
+
+ flag "github.com/spf13/pflag"
+)
+
+// Registrable registers a pflag
+type Registrable interface {
+ register(*flag.FlagSet)
+}
+
+type StringFlag struct {
+ Name string
+ Usage string
+ DefaultValue string
+}
+
+type StringSliceFlag struct {
+ Name string
+ Usage string
+ DefaultValue []string
+}
+
+type StringMapFlag struct {
+ Name string
+ Usage string
+ DefaultValue map[string]string
+}
+
+type IntFlag struct {
+ Name string
+ Usage string
+ DefaultValue int
+}
+
+type BoolFlag struct {
+ Name string
+ Usage string
+ DefaultValue bool
+}
+
+type DurationFlag struct {
+ Name string
+ Usage string
+ DefaultValue time.Duration
+}
+
+func (f *StringFlag) register(fs *flag.FlagSet) {
+ fs.String(f.Name, f.DefaultValue, f.Usage)
+}
+
+func (f *StringSliceFlag) register(fs *flag.FlagSet) {
+ fs.StringSlice(f.Name, f.DefaultValue, f.Usage)
+}
+
+func (f *StringMapFlag) register(fs *flag.FlagSet) {
+ fs.StringToString(f.Name, f.DefaultValue, f.Usage)
+}
+
+func (f *IntFlag) register(fs *flag.FlagSet) {
+ fs.Int(f.Name, f.DefaultValue, f.Usage)
+}
+
+func (f *BoolFlag) register(fs *flag.FlagSet) {
+ fs.Bool(f.Name, f.DefaultValue, f.Usage)
+}
+
+func (f *DurationFlag) register(fs *flag.FlagSet) {
+ fs.Duration(f.Name, f.DefaultValue, f.Usage)
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/types.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/types.go
new file mode 100644
index 000000000..4fc97cb2e
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/config/types.go
@@ -0,0 +1,138 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package config
+
+import (
+ "time"
+
+ advanced_metrics "github.com/nginx/agent/v2/src/extensions/advanced-metrics/pkg/advanced-metrics"
+)
+
+type Config struct {
+ Path string `yaml:"-"`
+ DynamicConfigPath string `yaml:"-"`
+ ClientID string `mapstructure:"agent_id" yaml:"-"`
+ CloudAccountID string `mapstructure:"cloud_account" yaml:"-"`
+ Server Server `mapstructure:"server" yaml:"-"`
+ AgentAPI AgentAPI `mapstructure:"api" yaml:"-"`
+ ConfigDirs string `mapstructure:"config-dirs" yaml:"-"`
+ Log LogConfig `mapstructure:"log" yaml:"-"`
+ TLS TLSConfig `mapstructure:"tls" yaml:"-"`
+ Nginx Nginx `mapstructure:"nginx" yaml:"-"`
+ Dataplane Dataplane `mapstructure:"dataplane" yaml:"-"`
+ AgentMetrics AgentMetrics `mapstructure:"metrics" yaml:"-"`
+ Tags []string `mapstructure:"tags" yaml:"tags,omitempty"`
+ Features []string `mapstructure:"features" yaml:"features,omitempty"`
+ Extensions []string `mapstructure:"extensions" yaml:"extensions,omitempty"`
+ Updated time.Time `yaml:"-"` // update time of the config file
+ AllowedDirectoriesMap map[string]struct{} `yaml:"-"`
+ DisplayName string `mapstructure:"display_name" yaml:"display_name,omitempty"`
+ InstanceGroup string `mapstructure:"instance_group" yaml:"instance_group,omitempty"`
+ AdvancedMetrics AdvancedMetrics `mapstructure:"advanced_metrics" yaml:"advanced_metrics,omitempty"`
+ NginxAppProtect NginxAppProtect `mapstructure:"nginx_app_protect" yaml:"nginx_app_protect,omitempty"`
+ NAPMonitoring NAPMonitoring `mapstructure:"nap_monitoring" yaml:"nap_monitoring,omitempty"`
+}
+
+func (c *Config) IsGrpcServerConfigured() bool {
+ return c.Server.Host != "" && c.Server.GrpcPort != 0
+}
+
+func (c *Config) IsNginxAppProtectConfigured() bool {
+ return c.NginxAppProtect != (NginxAppProtect{})
+}
+
+func (c *Config) IsNginxAppProtectPrecompiledPublicationConfigured() bool {
+ return c.NginxAppProtect.PrecompiledPublication
+}
+
+func (c *Config) IsFeatureEnabled(feature string) bool {
+ for _, configFeature := range c.Features {
+ if configFeature == feature {
+ return true
+ }
+ }
+ return false
+}
+
+type Server struct {
+ Host string `mapstructure:"host" yaml:"-"`
+ GrpcPort int `mapstructure:"grpcPort" yaml:"-"`
+ Token string `mapstructure:"token" yaml:"-"`
+ Metrics string `mapstructure:"metrics" yaml:"-"`
+ Command string `mapstructure:"command" yaml:"-"`
+ // This is internal and shouldnt be exposed as a flag
+ Target string `mapstructure:"target" yaml:"-"`
+}
+
+type AgentAPI struct {
+ Port int `mapstructure:"port" yaml:"-"`
+ Cert string `mapstructure:"cert" yaml:"-"`
+ Key string `mapstructure:"key" yaml:"-"`
+}
+
+// LogConfig for logging
+type LogConfig struct {
+ Level string `mapstructure:"level" yaml:"-"`
+ Path string `mapstructure:"path" yaml:"-"`
+}
+
+// TLSConfig for securing communications
+type TLSConfig struct {
+ Enable bool `mapstructure:"enable" yaml:"-"`
+ Cert string `mapstructure:"cert" yaml:"-"`
+ Key string `mapstructure:"key" yaml:"-"`
+ Ca string `mapstructure:"ca" yaml:"-"`
+ SkipVerify bool `mapstructure:"skip_verify" yaml:"-"`
+}
+
+// Nginx settings
+type Nginx struct {
+ ExcludeLogs string `mapstructure:"exclude_logs" yaml:"-"`
+ Debug bool `mapstructure:"debug" yaml:"-"`
+ NginxCountingSocket string `mapstructure:"socket" yaml:"-"`
+ NginxClientVersion int `mapstructure:"client_version" yaml:"-"`
+}
+
+type Dataplane struct {
+ Status Status `mapstructure:"status" yaml:"-"`
+}
+
+// Status polling for heartbeat settings
+type Status struct {
+ PollInterval time.Duration `mapstructure:"poll_interval" yaml:"-"`
+ ReportInterval time.Duration `mapstructure:"report_interval" yaml:"-"`
+}
+
+// AgentMetrics for system/agent metrics
+type AgentMetrics struct {
+ BulkSize int `mapstructure:"bulk_size" yaml:"-"`
+ ReportInterval time.Duration `mapstructure:"report_interval" yaml:"-"`
+ CollectionInterval time.Duration `mapstructure:"collection_interval" yaml:"-"`
+ Mode string `mapstructure:"mode" yaml:"-"`
+}
+
+type AdvancedMetrics struct {
+ SocketPath string `mapstructure:"socket_path" yaml:"-"`
+ AggregationPeriod time.Duration `mapstructure:"aggregation_period" yaml:"-"`
+ PublishingPeriod time.Duration `mapstructure:"publishing_period" yaml:"-"`
+ TableSizesLimits advanced_metrics.TableSizesLimits `mapstructure:"table_sizes_limits" yaml:"-"`
+}
+
+type NginxAppProtect struct {
+ ReportInterval time.Duration `mapstructure:"report_interval" yaml:"-"`
+ PrecompiledPublication bool `mapstructure:"precompiled_publication" yaml:"-"`
+}
+
+type NAPMonitoring struct {
+ CollectorBufferSize int `mapstructure:"collector_buffer_size" yaml:"-"`
+ ProcessorBufferSize int `mapstructure:"processor_buffer_size" yaml:"-"`
+ SyslogIP string `mapstructure:"syslog_ip" yaml:"-"`
+ SyslogPort int `mapstructure:"syslog_port" yaml:"-"`
+ ReportInterval time.Duration `mapstructure:"report_interval" yaml:"-"`
+ ReportCount int `mapstructure:"report_count" yaml:"-"`
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/environment.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/environment.go
new file mode 100644
index 000000000..375e60384
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/environment.go
@@ -0,0 +1,621 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/google/uuid"
+ "github.com/klauspost/cpuid/v2"
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/disk"
+ "github.com/shirou/gopsutil/v3/host"
+ "github.com/shirou/gopsutil/v3/process"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/nginx/agent/sdk/v2/files"
+ "github.com/nginx/agent/sdk/v2/proto"
+ "github.com/nginx/agent/v2/src/core/network"
+)
+
+//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate
+//counterfeiter:generate -o fake_environment_test.go . Environment
+//go:generate sh -c "grep -v agent/product/nginx-agent/v2/core fake_environment_test.go | sed -e s\\/core\\\\.\\/\\/g > fake_environment_fixed.go"
+//go:generate mv fake_environment_fixed.go fake_environment_test.go
+type Environment interface {
+ NewHostInfo(agentVersion string, tags *[]string, configDirs string, clearCache bool) *proto.HostInfo
+ GetHostname() (hostname string)
+ GetSystemUUID() (hostId string)
+ ReadDirectory(dir string, ext string) ([]string, error)
+ WriteFiles(backup ConfigApplyMarker, files []*proto.File, prefix string, allowedDirs map[string]struct{}) error
+ Processes() (result []Process)
+ FileStat(path string) (os.FileInfo, error)
+ DiskDevices() ([]string, error)
+ GetContainerID() (string, error)
+ GetNetOverflow() (float64, error)
+ IsContainer() bool
+}
+
+type ConfigApplyMarker interface {
+ MarkAndSave(string) error
+ RemoveFromNotExists(string)
+}
+
+type EnvironmentType struct {
+ host *proto.HostInfo
+}
+
+type Process struct {
+ Pid int32
+ Name string
+ CreateTime int64
+ Status string
+ IsRunning bool
+ IsMaster bool
+ Path string
+ User string
+ ParentPid int32
+ Command string
+}
+
+const lengthOfContainerId = 64
+
+var (
+ virtualizationFunc = host.Virtualization
+ _ Environment = &EnvironmentType{}
+)
+
+func (env *EnvironmentType) NewHostInfo(agentVersion string, tags *[]string, configDirs string, clearCache bool) *proto.HostInfo {
+ // temp cache measure
+ if env.host == nil || clearCache {
+ hostInformation, err := host.Info()
+ if err != nil {
+ log.Warnf("Unable to collect dataplane host information: %v, defaulting value", err)
+ return &proto.HostInfo{}
+ }
+
+ hostInfoFacacde := &proto.HostInfo{
+ Agent: agentVersion,
+ Boot: hostInformation.BootTime,
+ Hostname: hostInformation.Hostname,
+ DisplayName: hostInformation.Hostname,
+ OsType: hostInformation.OS,
+ Uuid: env.GetSystemUUID(),
+ Uname: hostInformation.KernelArch,
+ Partitons: diskPartitions(),
+ Network: env.networks(),
+ Processor: processors(),
+ Release: releaseInfo(),
+ Tags: *tags,
+ AgentAccessibleDirs: configDirs,
+ }
+
+ log.Tracef("HostInfo created: %v", hostInfoFacacde)
+ env.host = hostInfoFacacde
+ }
+ return env.host
+}
+
+func (env *EnvironmentType) GetHostname() string {
+ hostInformation, err := host.Info()
+ if err != nil {
+ log.Warnf("Unable to read hostname from dataplane, defaulting value. Error: %v", err)
+ return ""
+ }
+ return hostInformation.Hostname
+}
+
+func (env *EnvironmentType) GetSystemUUID() string {
+ if env.IsContainer() {
+ containerID, err := env.GetContainerID()
+ if err != nil {
+ log.Errorf("Unable to read docker container ID: %v", err)
+ return ""
+ }
+ return uuid.NewMD5(uuid.NameSpaceDNS, []byte(containerID)).String()
+ }
+
+ hostInfo, err := host.Info()
+ if err != nil {
+ log.Infof("Unable to read host id from dataplane, defaulting value. Error: %v", err)
+ return ""
+ }
+ return uuid.NewMD5(uuid.Nil, []byte(hostInfo.HostID)).String()
+}
+
+func (env *EnvironmentType) ReadDirectory(dir string, ext string) ([]string, error) {
+ var files []string
+ fileInfo, err := ioutil.ReadDir(dir)
+ if err != nil {
+ log.Warnf("Unable to reading directory %s: %v ", dir, err)
+ return files, err
+ }
+
+ for _, file := range fileInfo {
+ files = append(files, strings.Replace(file.Name(), ext, "", -1))
+ }
+
+ return files, nil
+}
+
+func (env *EnvironmentType) WriteFiles(backup ConfigApplyMarker, files []*proto.File, confPath string, allowedDirs map[string]struct{}) error {
+ err := allowedFiles(files, allowedDirs)
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ if err = writeFile(backup, file, confPath); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (env *EnvironmentType) IsContainer() bool {
+ const (
+ dockerEnv = "/.dockerenv"
+ containerEnv = "/run/.containerenv"
+ selfCgroup = "/proc/self/cgroup"
+ k8sServiceAcct = "/var/run/secrets/kubernetes.io/serviceaccount"
+ )
+
+ for _, filename := range []string{dockerEnv, containerEnv, k8sServiceAcct} {
+ if _, err := os.Stat(filename); err == nil {
+ log.Debugf("is a container because (%s) exists", filename)
+ return true
+ }
+ }
+ // v1 check
+ if result, err := cGroupV1Check(selfCgroup); err == nil && result {
+ return result
+ }
+
+ return false
+}
+
+// cGroupV1Check returns if running cgroup v1
+func cGroupV1Check(cgroupFile string) (bool, error) {
+ const (
+ k8sKind = "kubepods"
+ docker = "docker"
+ conatinerd = "containerd"
+ )
+
+ data, err := ioutil.ReadFile(cgroupFile)
+ if err != nil {
+ return false, err
+ }
+ scanner := bufio.NewScanner(strings.NewReader(string(data)))
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.Contains(line, k8sKind) || strings.Contains(line, docker) || strings.Contains(line, conatinerd) {
+ return true, nil
+ }
+ }
+ return false, errors.New("cGroup v1 information not found")
+}
+
+// GetContainerID returns the ID of the current environment if running in a container
+func (env *EnvironmentType) GetContainerID() (string, error) {
+ const mountInfo = "/proc/self/mountinfo"
+
+ if !env.IsContainer() {
+ return "", errors.New("not in docker")
+ }
+
+ containerID, err := getContainerID(mountInfo)
+ if err != nil {
+ return "", fmt.Errorf("could not get container ID: %v", err)
+ }
+
+ log.Debugf("Container ID: %s", containerID)
+
+ return containerID, err
+}
+
+// getContainerID returns the container ID of the current running environment.
+// Supports cgroup v1 and v2. Reading "/proc/1/cpuset" would only work for cgroups v1
+// mountInfo is the path: "/proc/self/mountinfo"
+func getContainerID(mountInfo string) (string, error) {
+ mInfoFile, err := os.Open(mountInfo)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %v", mountInfo, err)
+ }
+
+ fileScanner := bufio.NewScanner(mInfoFile)
+ fileScanner.Split(bufio.ScanLines)
+
+ var lines []string
+ for fileScanner.Scan() {
+ lines = append(lines, fileScanner.Text())
+ }
+ mInfoFile.Close()
+
+ basePattern := regexp.MustCompile("/([a-f0-9]{64})$")
+ colonPattern := regexp.MustCompile(":([a-f0-9]{64})$")
+ scopePattern := regexp.MustCompile(`/.+-(.+?).scope$`)
+ containersPattern := regexp.MustCompile("containers/([a-f0-9]{64})")
+ containerdPattern := regexp.MustCompile("sandboxes/([a-f0-9]{64})")
+
+ for _, line := range lines {
+ splitLine := strings.Split(line, " ")
+ for _, word := range splitLine {
+ slices := scopePattern.FindStringSubmatch(word)
+ if len(slices) >= 2 && len(slices[1]) == lengthOfContainerId {
+ return slices[1], nil
+ }
+
+ slices = basePattern.FindStringSubmatch(word)
+ if len(slices) >= 2 && len(slices[1]) == lengthOfContainerId {
+ return slices[1], nil
+ }
+
+ slices = colonPattern.FindStringSubmatch(word)
+ if len(slices) >= 2 && len(slices[1]) == lengthOfContainerId {
+ return slices[1], nil
+ }
+
+ slices = containersPattern.FindStringSubmatch(word)
+ if len(slices) >= 2 && len(slices[1]) == lengthOfContainerId {
+ return slices[1], nil
+ }
+
+ slices = containerdPattern.FindStringSubmatch(word)
+ if len(slices) >= 2 && len(slices[1]) == lengthOfContainerId {
+ return slices[1], nil
+ }
+ }
+ }
+
+ return "", errors.New("no container ID found")
+}
+
+// DiskDevices returns a list of Disk Devices known by the system.
+// Loop and other virtual devices are filtered out
+func (env *EnvironmentType) DiskDevices() ([]string, error) {
+ switch runtime.GOOS {
+ case "freebsd":
+ return env.getFreeBSDDiskDevices()
+ case "darwin":
+ return []string{}, errors.New("darwin architecture is not supported")
+ default:
+ return getLinuxDiskDevices()
+ }
+}
+
+func (env *EnvironmentType) GetNetOverflow() (float64, error) {
+ return network.GetNetOverflow()
+}
+
+func getLinuxDiskDevices() ([]string, error) {
+ const (
+ SysBlockDir = "/sys/block"
+ LoopDeviceMark = "/loop"
+ VirtualDeviceMark = "/virtual"
+ )
+ dd := []string{}
+ log.Debugf("Reading directory for linux disk information: %s", SysBlockDir)
+
+ dir, err := ioutil.ReadDir(SysBlockDir)
+ if err != nil {
+ return dd, err
+ }
+
+ for _, f := range dir {
+ rl, e := os.Readlink(filepath.Join(SysBlockDir, f.Name()))
+ if e != nil {
+ continue
+ }
+ if strings.Contains(rl, LoopDeviceMark) || strings.Contains(rl, VirtualDeviceMark) {
+ continue
+ }
+ dd = append(dd, f.Name())
+ }
+
+ return dd, nil
+}
+
+func (env *EnvironmentType) getFreeBSDDiskDevices() ([]string, error) {
+ devices := []string{}
+
+ geomBin, secErr := env.checkUtil("geom")
+ if secErr != nil {
+ return devices, secErr
+ }
+
+ outbuf, err := runCmd(geomBin, "disk", "list")
+ if err != nil {
+ return devices, errors.New("unable to obtain disk list")
+ }
+
+ for _, line := range strings.Split(outbuf.String(), "\n") {
+ if !strings.HasPrefix(line, "Geom name:") {
+ continue
+ }
+ geomFields := strings.Fields(line)
+ devices = append(devices, geomFields[len(geomFields)-1])
+ }
+
+ return devices, nil
+}
+
+func allowedFiles(files []*proto.File, allowedDirs map[string]struct{}) error {
+ for _, file := range files {
+ path := file.GetName()
+ if !allowedFile(path, allowedDirs) {
+ return fmt.Errorf("write prohibited for: %s", path)
+ }
+ }
+ return nil
+}
+
+func allowedFile(path string, allowedDirs map[string]struct{}) bool {
+ if !filepath.IsAbs(path) {
+ // if not absolute path, we'll put it at the relative to config dir for the binary
+ return true
+ }
+ for dir := range allowedDirs {
+ if strings.HasPrefix(path, dir) {
+ return true
+ }
+ }
+ return false
+}
+
+// writeFile writes the provided file content to disk. If the file.GetName() returns an absolute path, it'll be written
+// to the path. Otherwise, it'll be written to the path relative to the provided confPath.
+func writeFile(backup ConfigApplyMarker, file *proto.File, confPath string) error {
+ fileFullPath := file.GetName()
+ if !filepath.IsAbs(fileFullPath) {
+ fileFullPath = filepath.Join(confPath, fileFullPath)
+ }
+
+ if err := backup.MarkAndSave(fileFullPath); err != nil {
+ return err
+ }
+ permissions := files.GetFileMode(file.GetPermissions())
+
+ directory := filepath.Dir(fileFullPath)
+ _, err := os.Stat(directory)
+ if os.IsNotExist(err) {
+ log.Debugf("Creating directory %s with permissions 755", directory)
+ err = os.MkdirAll(directory, 0755)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := ioutil.WriteFile(fileFullPath, file.GetContents(), permissions); err != nil {
+ // If the file didn't exist originally and failed to be created
+ // Then remove that file from the backup so that the rollback doesn't try to delete the file
+ if _, err := os.Stat(fileFullPath); !errors.Is(err, os.ErrNotExist) {
+ backup.RemoveFromNotExists(fileFullPath)
+ }
+ return err
+ }
+
+ log.Debugf("Wrote file %s", fileFullPath)
+ return nil
+}
+
+func (env *EnvironmentType) FileStat(path string) (os.FileInfo, error) {
+ // TODO: check if allowed list
+ return os.Stat(path)
+}
+
+// Processes returns a slice of nginx master and nginx worker processes currently running
+func (env *EnvironmentType) Processes() (result []Process) {
+ var processList []Process
+
+ pids, err := process.Pids()
+ if err != nil {
+ log.Errorf("failed to read pids for dataplane host: %v", err)
+ return processList
+ }
+
+ seenPids := make(map[int32]bool)
+ for _, pid := range pids {
+ p, _ := process.NewProcess(pid)
+ name, _ := p.Name()
+
+ if name == "nginx" {
+ createTime, _ := p.CreateTime()
+ status, _ := p.Status()
+ running, _ := p.IsRunning()
+ user, _ := p.Username()
+ ppid, _ := p.Ppid()
+ cmd, _ := p.Cmdline()
+ exe, _ := p.Exe()
+
+ // if the exe is empty, try get the exe from the parent
+ if exe == "" {
+ parentProcess, _ := process.NewProcess(ppid)
+ exe, _ = parentProcess.Exe()
+ }
+
+ processList = append(processList, Process{
+ Pid: pid,
+ Name: name,
+ CreateTime: createTime, // Running time is probably different
+ Status: strings.Join(status, " "),
+ IsRunning: running,
+ Path: exe,
+ User: user,
+ ParentPid: ppid,
+ Command: cmd,
+ })
+ seenPids[pid] = true
+ }
+ }
+
+ for i := 0; i < len(processList); i++ {
+ item := &processList[i]
+ if seenPids[item.ParentPid] {
+ item.IsMaster = false
+ } else {
+ item.IsMaster = true
+ }
+ }
+
+ return processList
+}
+
+func processors() (res []*proto.CpuInfo) {
+ log.Debug("Reading CPU information for dataplane host")
+ cpus, err := cpu.Info()
+ if err != nil {
+ log.Warnf("%v", err)
+ return []*proto.CpuInfo{}
+ }
+
+ hypervisor, virtual := virtualization()
+
+ for _, item := range cpus {
+ processor := proto.CpuInfo{
+ // TODO: Model is a number
+ // wait to see if unmarshalling error on control plane side is fixed with switch in models
+ // https://stackoverflow.com/questions/21151765/cannot-unmarshal-string-into-go-value-of-type-int64
+ Model: item.Model,
+ Cores: item.Cores,
+ Architecture: item.Family,
+ Cpus: int32(len(cpus)),
+ Mhz: item.Mhz,
+ // TODO - check if this is correct
+ Hypervisor: hypervisor,
+ Virtualization: virtual,
+ Cache: processorCache(item),
+ }
+
+ res = append(res, &processor)
+ }
+
+ return res
+}
+
+func processorCache(item cpu.InfoStat) map[string]string {
+ // Find a library that supports multiple CPUs
+ cache := map[string]string{
+ // values are in bytes
+ "L1d": fmt.Sprintf("%v", cpuid.CPU.Cache.L1D),
+ "L1i": fmt.Sprintf("%v", cpuid.CPU.Cache.L1D),
+ "L2": fmt.Sprintf("%v", cpuid.CPU.Cache.L2),
+ "L3": fmt.Sprintf("%v", cpuid.CPU.Cache.L3),
+ "Features:": strings.Join(cpuid.CPU.FeatureSet(), ","),
+ // "Flags:": strings.Join(item.Flags, ","),
+ "Cacheline bytes:": fmt.Sprintf("%v", cpuid.CPU.CacheLine),
+ }
+
+ if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) {
+ cache["SIMD 2:"] = "Streaming SIMD 2 Extensions"
+ }
+ return cache
+}
+
+func virtualization() (string, string) {
+ // doesn't check k8s
+ virtualizationSystem, virtualizationRole, err := virtualizationFunc()
+ if err != nil {
+ log.Warnf("Error reading virtualization: %v", err)
+ return "", "host"
+ }
+
+ if virtualizationSystem == "docker" {
+ return "container", virtualizationRole
+ }
+ return virtualizationSystem, virtualizationRole
+}
+
+func diskPartitions() (partitions []*proto.DiskPartition) {
+ parts, err := disk.Partitions(false)
+ if err != nil {
+ // return an array of 0
+ log.Errorf("Could not read disk partitions for host: %v", err)
+ return []*proto.DiskPartition{}
+ }
+ for _, part := range parts {
+ pm := proto.DiskPartition{
+ MountPoint: part.Mountpoint,
+ Device: part.Device,
+ FsType: part.Fstype,
+ }
+ partitions = append(partitions, &pm)
+ }
+ return partitions
+}
+
+func releaseInfo() (release *proto.ReleaseInfo) {
+ hostInfo, err := host.Info()
+ if err != nil {
+ log.Errorf("Could not read release information for host: %v", err)
+ return &proto.ReleaseInfo{}
+ }
+
+ return &proto.ReleaseInfo{
+ VersionId: hostInfo.PlatformVersion,
+ Version: hostInfo.KernelVersion,
+ Codename: hostInfo.OS,
+ Name: hostInfo.PlatformFamily,
+ Id: hostInfo.Platform,
+ }
+}
+
+func (env *EnvironmentType) networks() (res *proto.Network) {
+ return network.GetDataplaneNetworks()
+}
+
+func (env *EnvironmentType) checkUtil(util string) (string, error) {
+ log.Infof("Trying to exec the following utility: %s", util)
+ path, err := exec.LookPath(util)
+ if err != nil {
+ return "", err
+ }
+
+ info, err := env.FileStat(path)
+ if err != nil {
+ return "", err
+ }
+
+ stat, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return "", fmt.Errorf("unable to determine binary ownership: %s", path)
+ } else if stat.Uid != 0 {
+ return "", fmt.Errorf("binary is not root owned: %s", path)
+ }
+
+ if info.Mode()&(os.ModeSetgid|os.ModeSetuid) != 0 {
+ return "", fmt.Errorf("SetUID or SetGID bits set: %s", path)
+ }
+
+ return path, nil
+}
+
+func runCmd(cmd string, args ...string) (*bytes.Buffer, error) {
+ log.Infof("Attempting to run command: %s with args %v", cmd, strings.Join(args, " "))
+
+ command := exec.Command(cmd, args...)
+
+ output, err := command.CombinedOutput()
+ if err != nil {
+ log.Warnf("%v %v failed:\n%s", cmd, strings.Join(args, " "), output)
+ return bytes.NewBuffer(output), err
+ }
+
+ return bytes.NewBuffer(output), nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/info.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/info.go
new file mode 100644
index 000000000..7ed419dd5
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/info.go
@@ -0,0 +1,28 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+type Info struct {
+ name *string
+ version *string
+}
+
+func NewInfo(name string, version string) *Info {
+ info := new(Info)
+ info.name = &name
+ info.version = &version
+ return info
+}
+
+func (info *Info) Name() string {
+ return *info.name
+}
+
+func (info *Info) Version() string {
+ return *info.version
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/message.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/message.go
new file mode 100644
index 000000000..c37fe9248
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/message.go
@@ -0,0 +1,43 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "strings"
+)
+
+type Payload interface {
+}
+
+type Message struct {
+ topic *string
+ data *Payload
+}
+
+func NewMessage(topic string, data Payload) *Message {
+ message := new(Message)
+ message.topic = &topic
+ message.data = &data
+ return message
+}
+
+func (m *Message) Match(topic string) bool {
+ return strings.HasPrefix(*m.topic, topic)
+}
+
+func (m *Message) Exact(topic string) bool {
+ return *m.topic == topic
+}
+
+func (m *Message) Topic() string {
+ return *m.topic
+}
+
+func (m *Message) Data() Payload {
+ return *m.data
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/mock_pipe.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/mock_pipe.go
new file mode 100644
index 000000000..68f1eb823
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/mock_pipe.go
@@ -0,0 +1,100 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "context"
+ "testing"
+)
+
+// MockMessagePipe is a mock message pipe
+type MockMessagePipe struct {
+ plugins []Plugin
+ messages []*Message
+ processedMessages []*Message
+ ctx context.Context
+}
+
+var _ MessagePipeInterface = &MockMessagePipe{}
+
+func SetupMockMessagePipe(t *testing.T, ctx context.Context, plugin ...Plugin) *MockMessagePipe {
+ messagePipe := NewMockMessagePipe(ctx)
+
+ err := messagePipe.Register(10, plugin...)
+ if err != nil {
+ t.Fail()
+ }
+ return messagePipe
+}
+
+func ValidateMessages(t *testing.T, messagePipe *MockMessagePipe, msgTopics []string) {
+ processedMessages := messagePipe.GetProcessedMessages()
+ if len(processedMessages) != len(msgTopics) {
+ t.Fatalf("expected %d messages, received %d: %+v", len(msgTopics), len(processedMessages), processedMessages)
+ }
+ for idx, msg := range processedMessages {
+ if msgTopics[idx] != msg.Topic() {
+ t.Errorf("unexpected message topic: %s :: should have been: %s", msg.Topic(), msgTopics[idx])
+ }
+ }
+ messagePipe.ClearMessages()
+}
+
+func NewMockMessagePipe(ctx context.Context) *MockMessagePipe {
+ return &MockMessagePipe{
+ ctx: ctx,
+ }
+}
+
+func (p *MockMessagePipe) Register(size int, plugin ...Plugin) error {
+ p.plugins = append(p.plugins, plugin...)
+ return nil
+}
+
+func (p *MockMessagePipe) Context() context.Context {
+ return p.ctx
+}
+
+func (p *MockMessagePipe) Process(msgs ...*Message) {
+ p.messages = append(p.messages, msgs...)
+}
+
+func (p *MockMessagePipe) GetMessages() []*Message {
+ return p.messages
+}
+
+func (p *MockMessagePipe) GetProcessedMessages() []*Message {
+ return p.processedMessages
+}
+
+func (p *MockMessagePipe) ClearMessages() {
+ p.processedMessages = []*Message{}
+ p.messages = []*Message{}
+}
+
+func (p *MockMessagePipe) Run() {
+ for _, plugin := range p.plugins {
+ plugin.Init(p)
+ }
+ p.RunWithoutInit()
+}
+
+func (p *MockMessagePipe) RunWithoutInit() {
+ var message *Message
+ for len(p.messages) > 0 {
+ message, p.messages = p.messages[0], p.messages[1:]
+ for _, plugin := range p.plugins {
+ plugin.Process(message)
+ }
+ p.processedMessages = append(p.processedMessages, message)
+ }
+}
+
+func (p *MockMessagePipe) GetPlugins() []Plugin {
+ return p.plugins
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/network/network.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/network/network.go
new file mode 100644
index 000000000..9e4bb88e6
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/network/network.go
@@ -0,0 +1,405 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package network
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/nginx/agent/sdk/v2/proto"
+)
+
+const (
+ IPv6len = 16
+ hexDigit = "0123456789abcdef"
+ linuxFile = "/proc/net/route"
+ FREEBSD = "freebsd"
+ SOLARIS = "solaris"
+ DARWIN = "darwin"
+ LINUX = "linux"
+)
+
+var (
+ reOverflow = regexp.MustCompile(`\s*(\d+)\s*`)
+ reTimesOverflowed = regexp.MustCompile("times the listen queue of a socket overflowed")
+)
+
+type routeStruct struct {
+ Iface string
+ Destination string
+ Gateway string
+ Flags string
+ RefCnt string
+ Use string
+ Metric string
+ Mask string
+ MTU string
+ Window string
+ IRTT string
+}
+
+// Get net overflow. The command (netstat) to get net overflow may not be available on all platforms
+func GetNetOverflow() (float64, error) {
+ const (
+ Netstat = "netstat"
+ NetstatFlags = "-s"
+ )
+ overflows := 0.0
+ switch runtime.GOOS {
+ case FREEBSD:
+ return freeBsdSolaris(Netstat, NetstatFlags, overflows)
+ case SOLARIS:
+ return freeBsdSolaris(Netstat, NetstatFlags, overflows)
+ case DARWIN:
+ return overflows, errors.New("this operating system is not implemented")
+ case LINUX:
+ return overflows, errors.New("this operating system is not implemented")
+ default:
+ return overflows, errors.New("this operating system is not implemented")
+ }
+}
+
+func freeBsdSolaris(netstat string, flags string, overflows float64) (float64, error) {
+ netstatCmd := exec.Command(netstat, flags)
+ outbuf, err := netstatCmd.CombinedOutput()
+
+ if err != nil {
+ errMsg := fmt.Sprintf("netstat not available: %v", err)
+ log.Debug(errMsg)
+ return overflows, errors.New(errMsg)
+ }
+
+ scanner := bufio.NewScanner(strings.NewReader(string(outbuf)))
+ matches := []string{}
+ for scanner.Scan() {
+ line := scanner.Text()
+ if reTimesOverflowed.MatchString(line) {
+ matches = append(matches, line)
+ }
+ }
+ so := strings.Join(matches, "\n")
+
+ ofm := reOverflow.FindStringSubmatch(so)
+ if len(ofm) > 1 {
+ overflows, _ = strconv.ParseFloat(ofm[1], 64)
+ }
+
+ return overflows, nil
+}
+
+func GetDataplaneNetworks() (res *proto.Network) {
+ const (
+ NetmaskFormat = "%v.%v.%v.%v"
+ )
+ ifs, err := net.Interfaces()
+ if err != nil {
+ log.Errorf("error getting network interfaces on host: %v", err)
+ return &proto.Network{}
+ }
+
+ interfaces := []*proto.NetworkInterface{}
+ for _, netInterface := range ifs {
+ networkInterface := &proto.NetworkInterface{
+ Mac: netInterface.HardwareAddr.String(),
+ Name: netInterface.Name,
+ }
+ ipv4Addrs := make([]*proto.Address, 0)
+ ipv6Addrs := make([]*proto.Address, 0)
+
+ addrs, err := netInterface.Addrs()
+ if err != nil || len(addrs) == 0 {
+ // don't care about things without addrs
+ continue
+ }
+ for _, a := range addrs {
+ v, ok := a.(*net.IPNet)
+ if !ok {
+ continue
+ }
+ mask, _ := net.IPMask.Size(v.Mask)
+
+ addr := &proto.Address{}
+ addr.Address = v.IP.String()
+ addr.Prefixlen = int64(mask)
+
+ if v.IP.To4() != nil {
+ addr.Netmask = fmt.Sprintf(NetmaskFormat, v.Mask[0], v.Mask[1], v.Mask[2], v.Mask[3])
+ ipv4Addrs = append(ipv4Addrs, addr)
+ } else {
+ addr.Netmask = ipv6ToStr(v.Mask)
+ ipv6Addrs = append(ipv6Addrs, addr)
+ }
+ }
+ networkInterface.Ipv4 = ipv4Addrs
+ networkInterface.Ipv6 = ipv6Addrs
+ interfaces = append(interfaces, networkInterface)
+ }
+
+ defaultNetworkInterface, err := getDefaultNetworkInterfaceCrossPlatform()
+ if err != nil {
+ log.Debugf("Error getting default network interface, %v", err)
+ }
+
+ if defaultNetworkInterface == "" && len(ifs) > 0 {
+ defaultNetworkInterface = ifs[0].Name
+ }
+ return &proto.Network{Interfaces: interfaces, Default: defaultNetworkInterface}
+}
+
+func getDefaultNetworkInterfaceCrossPlatform() (string, error) {
+ const (
+ SBinRoute = "/sbin/route"
+ SBinFlags = "-n"
+ SBinCommand = "get"
+ SBinDefaultRoute = "0.0.0.0"
+ Netstat = "netstat"
+ NetstatFlags = "-rn"
+ )
+ switch runtime.GOOS {
+ case FREEBSD:
+ return getInterfaceUsing(Netstat, NetstatFlags)
+ case SOLARIS:
+ return getInterfaceUsing(Netstat, NetstatFlags)
+ case DARWIN:
+ routeCmd := exec.Command(SBinRoute, SBinFlags, SBinCommand, SBinDefaultRoute)
+ output, err := routeCmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ routeStruct, err := parseToSbinRouteStruct(output)
+ if err != nil {
+ return "", err
+ }
+ return routeStruct.Iface, nil
+ case LINUX:
+ f, err := os.Open(linuxFile)
+ if err != nil {
+ return "", fmt.Errorf("Can't access %s", linuxFile)
+ }
+ defer f.Close()
+
+ output, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", fmt.Errorf("Can't read contents of %s", linuxFile)
+ }
+
+ parsedStruct, err := parseToLinuxRouteStruct(output)
+ if err != nil {
+ return "", err
+ }
+
+ return parsedStruct.Iface, nil
+ default:
+ return "", errors.New("this operating system is not implemented")
+ }
+}
+
+func getInterfaceUsing(netstat string, flags string) (string, error) {
+ netstatCmd := exec.Command(netstat, flags)
+ output, err := netstatCmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ routeStruct, err := parseNetstatToRouteStruct(output)
+ if err != nil {
+ return "", err
+ }
+ return routeStruct.Iface, nil
+}
+
+func parseToSbinRouteStruct(output []byte) (routeStruct, error) {
+ const (
+ DestinationStr = "destination:"
+ MaskStr = "mask:"
+ GatewayStr = "gateway:"
+ InterfaceStr = "interface:"
+ FlagsStr = "flags:"
+ )
+ var err error
+ rs := routeStruct{}
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ fields := strings.Fields(line)
+ if len(fields) > 0 {
+ switch index := fields[0]; index {
+ case DestinationStr:
+ rs.Destination = fields[1]
+ case MaskStr:
+ rs.Mask = fields[1]
+ case GatewayStr:
+ rs.Gateway = fields[1]
+ case InterfaceStr:
+ rs.Iface = fields[1]
+ case FlagsStr:
+ rs.Flags = fields[1]
+ default:
+ continue
+ }
+ }
+ }
+ if rs.Iface == "" {
+ err = errors.New("unable to determine default interface")
+ } else {
+ err = nil
+ }
+
+ return rs, err
+}
+
+func parseNetstatToRouteStruct(output []byte) (routeStruct, error) {
+ const (
+ destinationField = 0
+ gatewayField = 1
+ flagsField = 2
+ interfaceField = 3
+ interfaceFlag = "I"
+ searchString = "default"
+ )
+ outputLines := strings.Split(string(output), "\n")
+ for _, line := range outputLines {
+ fields := strings.Fields(line)
+ // this check prevents hitting the first 3 lines of nestat output
+ if len(fields) >= 2 && fields[destinationField] == searchString {
+ if !strings.ContainsAny(fields[flagsField], interfaceFlag) {
+ return routeStruct{
+ Iface: fields[interfaceField],
+ Destination: fields[destinationField],
+ Gateway: fields[gatewayField],
+ Flags: fields[flagsField],
+ }, nil
+ }
+ }
+ }
+
+ return routeStruct{}, errors.New("unable to determine default interface")
+}
+
+// Referenced from https://github.com/jackpal/gateway and adapted
+func parseToLinuxRouteStruct(output []byte) (routeStruct, error) {
+ const (
+ destinationField = 1 // field containing hex destination address
+ )
+ lineNumber := 0
+ scanner := bufio.NewScanner(bytes.NewReader(output))
+
+ // Skip header line
+ if !scanner.Scan() {
+ return routeStruct{}, errors.New("Invalid linux route file")
+ }
+
+ for scanner.Scan() {
+ lineNumber++
+ if lineNumber == 1 {
+ // Skip header line.
+ continue
+ }
+ row := scanner.Text()
+ tokens := strings.Fields(strings.TrimSpace(row))
+ if len(tokens) < 11 {
+ return routeStruct{}, fmt.Errorf("invalid row '%s' in route file: doesn't have 11 fields", row)
+ }
+
+ // Cast hex destination address to int
+ destinationHex := "0x" + tokens[destinationField]
+ destination, err := strconv.ParseInt(destinationHex, 0, 64)
+ if err != nil {
+ return routeStruct{}, fmt.Errorf(
+ "parsing destination field hex '%s' in row '%s': %w",
+ destinationHex,
+ row,
+ err,
+ )
+ }
+
+ // The default interface is the one that's 0
+ if destination != 0 {
+ continue
+ }
+
+ return routeStruct{
+ Iface: tokens[0],
+ Destination: tokens[1],
+ Gateway: tokens[2],
+ Flags: tokens[3],
+ RefCnt: tokens[4],
+ Use: tokens[5],
+ Metric: tokens[6],
+ Mask: tokens[7],
+ MTU: tokens[8],
+ Window: tokens[9],
+ IRTT: tokens[10],
+ }, nil
+ }
+ return routeStruct{}, errors.New("interface with default destination not found")
+}
+
+func ipv6ToStr(ip []byte) string {
+ p := ip
+ // Find longest run of zeros.
+ e0 := -1
+ e1 := -1
+ for i := 0; i < IPv6len; i += 2 {
+ j := i
+ for j < IPv6len && p[j] == 0 && p[j+1] == 0 {
+ j += 2
+ }
+ if j > i && j-i > e1-e0 {
+ e0 = i
+ e1 = j
+ i = j
+ }
+ }
+ // The symbol "::" MUST NOT be used to shorten just one 16 bit 0 field.
+ if e1-e0 <= 2 {
+ e0 = -1
+ e1 = -1
+ }
+
+ const maxLen = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
+ b := make([]byte, 0, maxLen)
+
+ // Return with possible :: in place of run of zeros
+ for i := 0; i < IPv6len; i += 2 {
+ if i == e0 {
+ b = append(b, ':', ':')
+ i = e1
+ if i >= IPv6len {
+ break
+ }
+ } else if i > 0 {
+ b = append(b, ':')
+ }
+ b = appendHex(b, (uint32(p[i])<<8)|uint32(p[i+1]))
+ }
+ return string(b)
+}
+
+func appendHex(dst []byte, i uint32) []byte {
+ if i == 0 {
+ return append(dst, '0')
+ }
+ for j := 7; j >= 0; j-- {
+ v := i >> uint(j*4)
+ if v > 0 {
+ dst = append(dst, hexDigit[v&0xf])
+ }
+ }
+ return dst
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/nginx.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/nginx.go
new file mode 100644
index 000000000..7a76e74cd
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/nginx.go
@@ -0,0 +1,847 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/nginx/agent/sdk/v2"
+ "github.com/nginx/agent/sdk/v2/proto"
+ "github.com/nginx/agent/v2/src/core/config"
+)
+
+const (
+ withWithPrefix = "with-"
+ withModuleSuffix = "module"
+ defaultNginxOssPrefix = "/usr/local/nginx"
+)
+
+var (
+ logMutex sync.Mutex
+ unpackMutex sync.Mutex
+ re = regexp.MustCompile(`(?P\S+)/(?P\S+)`)
+ plusre = regexp.MustCompile(`(?P\S+)/(?P\S+).\((?P\S+plus\S+)\)`)
+)
+
+type NginxBinary interface {
+ Start(nginxId, bin string) error
+ Stop(processId, bin string) error
+ Reload(processId, bin string) error
+ ValidateConfig(processId, bin, configLocation string, config *proto.NginxConfig, configApply *sdk.ConfigApply) error
+ GetNginxDetailsFromProcess(nginxProcess Process) *proto.NginxDetails
+ GetNginxDetailsByID(nginxID string) *proto.NginxDetails
+ GetNginxIDForProcess(nginxProcess Process) string
+ GetNginxDetailsMapFromProcesses(nginxProcesses []Process) map[string]*proto.NginxDetails
+ UpdateNginxDetailsFromProcesses(nginxProcesses []Process)
+ WriteConfig(config *proto.NginxConfig) (*sdk.ConfigApply, error)
+ ReadConfig(path, nginxId, systemId string) (*proto.NginxConfig, error)
+ UpdateLogs(existingLogs map[string]string, newLogs map[string]string) bool
+ GetAccessLogs() map[string]string
+ GetErrorLogs() map[string]string
+ GetChildProcesses() map[string][]*proto.NginxDetails
+}
+
+type NginxBinaryType struct {
+ detailsMapMutex sync.Mutex
+ workersMapMutex sync.Mutex
+ env Environment
+ config *config.Config
+ nginxDetailsMap map[string]*proto.NginxDetails
+ nginxWorkersMap map[string][]*proto.NginxDetails
+ nginxInfoMap map[string]*nginxInfo
+ accessLogs map[string]string
+ errorLogs map[string]string
+ accessLogsUpdated bool
+ errorLogsUpdated bool
+}
+
+type nginxInfo struct {
+ version string
+ mtime time.Time
+ plusver string
+ source string
+ prefix string
+ confPath string
+ logPath string
+ errorPath string
+ ssl []string
+ cfgf map[string]interface{}
+ configureArgs []string
+ loadableModules []string
+ modulesPath string
+}
+
+func NewNginxBinary(env Environment, config *config.Config) *NginxBinaryType {
+ return &NginxBinaryType{
+ env: env,
+ nginxInfoMap: make(map[string]*nginxInfo),
+ accessLogs: make(map[string]string),
+ errorLogs: make(map[string]string),
+ config: config,
+ }
+}
+
+func (n *NginxBinaryType) GetNginxDetailsMapFromProcesses(nginxProcesses []Process) map[string]*proto.NginxDetails {
+ n.detailsMapMutex.Lock()
+ defer n.detailsMapMutex.Unlock()
+ return n.nginxDetailsMap
+}
+
+func (n *NginxBinaryType) UpdateNginxDetailsFromProcesses(nginxProcesses []Process) {
+ n.detailsMapMutex.Lock()
+ defer n.detailsMapMutex.Unlock()
+ n.nginxDetailsMap = map[string]*proto.NginxDetails{}
+
+ n.workersMapMutex.Lock()
+ defer n.workersMapMutex.Unlock()
+ n.nginxWorkersMap = map[string][]*proto.NginxDetails{}
+
+ for _, process := range nginxProcesses {
+ nginxDetails := n.GetNginxDetailsFromProcess(process)
+ if process.IsMaster {
+ n.nginxDetailsMap[nginxDetails.GetNginxId()] = nginxDetails
+ } else {
+ n.nginxWorkersMap[nginxDetails.GetNginxId()] = append(n.nginxWorkersMap[nginxDetails.GetNginxId()], nginxDetails)
+ }
+ }
+}
+
+func (n *NginxBinaryType) GetChildProcesses() map[string][]*proto.NginxDetails {
+ n.workersMapMutex.Lock()
+ defer n.workersMapMutex.Unlock()
+ return n.nginxWorkersMap
+}
+
+func (n *NginxBinaryType) GetNginxIDForProcess(nginxProcess Process) string {
+ defaulted := n.sanitizeProcessPath(&nginxProcess)
+ info := n.getNginxInfoFrom(nginxProcess.Path)
+
+ // reset the process path from the default to what NGINX tells us
+ if defaulted &&
+ info.cfgf["sbin-path"] != nil &&
+ nginxProcess.Path != info.cfgf["sbin-path"] {
+ nginxProcess.Path = info.cfgf["sbin-path"].(string)
+ }
+
+ return n.getNginxIDFromProcessInfo(nginxProcess, info)
+}
+
+func (n *NginxBinaryType) getNginxIDFromProcessInfo(nginxProcess Process, info *nginxInfo) string {
+ return GenerateNginxID("%s_%s_%s", nginxProcess.Path, info.confPath, info.prefix)
+}
+
+func (n *NginxBinaryType) GetNginxDetailsByID(nginxID string) *proto.NginxDetails {
+ n.detailsMapMutex.Lock()
+ defer n.detailsMapMutex.Unlock()
+ return n.nginxDetailsMap[nginxID]
+}
+
+func (n *NginxBinaryType) sanitizeProcessPath(nginxProcess *Process) bool {
+ defaulted := false
+ if nginxProcess.Path == "" {
+ nginxProcess.Path = defaultToNginxCommandForProcessPath()
+ defaulted = true
+ }
+ if strings.Contains(nginxProcess.Path, execDeleted) {
+ log.Debugf("nginx was upgraded (process), using new info")
+ nginxProcess.Path = sanitizeExecDeletedPath(nginxProcess.Path)
+ }
+ return defaulted
+}
+
+func (n *NginxBinaryType) GetNginxDetailsFromProcess(nginxProcess Process) *proto.NginxDetails {
+ defaulted := n.sanitizeProcessPath(&nginxProcess)
+ info := n.getNginxInfoFrom(nginxProcess.Path)
+
+ // reset the process path from the default to what NGINX tells us
+ if defaulted &&
+ info.cfgf["sbin-path"] != nil &&
+ nginxProcess.Path != info.cfgf["sbin-path"] {
+ nginxProcess.Path = info.cfgf["sbin-path"].(string)
+ }
+
+ nginxID := n.getNginxIDFromProcessInfo(nginxProcess, info)
+ log.Tracef("NGINX %s %s %s %v nginxID=%s conf=%s", info.plusver, info.source, info.ssl, info.cfgf, nginxID, info.confPath)
+
+ nginxDetailsFacade := &proto.NginxDetails{
+ NginxId: nginxID,
+ Version: info.version,
+ ConfPath: info.confPath,
+ ProcessId: fmt.Sprintf("%d", nginxProcess.Pid),
+ ProcessPath: nginxProcess.Path,
+ StartTime: nginxProcess.CreateTime,
+ BuiltFromSource: false,
+ LoadableModules: info.loadableModules,
+ RuntimeModules: runtimeFromConfigure(info.configureArgs),
+ Plus: buildPlus(info.plusver),
+ Ssl: buildSsl(info.ssl, info.source),
+ ConfigureArgs: info.configureArgs,
+ }
+
+ if path := getConfPathFromCommand(nginxProcess.Command); path != "" {
+ log.Tracef("Custom conf path set: %v", path)
+ nginxDetailsFacade.ConfPath = path
+ }
+
+ url, err := sdk.GetStatusApiInfo(nginxDetailsFacade.ConfPath)
+ if err != nil {
+ log.Tracef("Unable to get status api from the configuration: NGINX metrics will be unavailable for this system. please configure a status API to get NGINX metrics: %v", err)
+ }
+ nginxDetailsFacade.StatusUrl = url
+
+ return nginxDetailsFacade
+}
+
+func defaultToNginxCommandForProcessPath() string {
+ log.Debug("Defaulting to NGINX on path")
+
+ // LookPath figures out the full path of the binary using the $PATH
+ // command is not portable
+ path, err := exec.LookPath("nginx")
+ if err != nil {
+ log.Warnf("Unable to find the correct NGINX binary in $PATH: %v", err)
+ return ""
+ }
+ return path
+}
+
+// Start starts NGINX.
+func (n *NginxBinaryType) Start(nginxId, bin string) error {
+ log.Infof("Starting NGINX Id: %s Bin: %s", nginxId, bin)
+
+ _, err := runCmd(bin)
+ if err != nil {
+ log.Errorf("Starting NGINX caused error: %v", err)
+ } else {
+ log.Infof("NGINX Id: %s Started", nginxId)
+ }
+
+ return err
+}
+
+// Reload NGINX.
+func (n *NginxBinaryType) Reload(processId, bin string) error {
+ log.Infof("Reloading NGINX: %s PID: %s", bin, processId)
+ intProcess, err := strconv.Atoi(processId)
+ if err != nil {
+ log.Errorf("Reloading NGINX caused error when trying to determine process id: %v", err)
+ return err
+ }
+
+ err = syscall.Kill(intProcess, syscall.SIGHUP)
+ if err != nil {
+ log.Errorf("Reloading NGINX caused error: %v", err)
+ } else {
+ log.Infof("NGINX with process Id: %s reloaded", processId)
+ }
+ return err
+}
+
+// ValidateConfig tests the config with nginx -t -c configLocation.
+func (n *NginxBinaryType) ValidateConfig(processId, bin, configLocation string, config *proto.NginxConfig, configApply *sdk.ConfigApply) error {
+ log.Debugf("Validating config, %s for nginx process, %s", configLocation, processId)
+ response, err := runCmd(bin, "-t", "-c", configLocation)
+ if err != nil {
+ confFiles, auxFiles, getNginxConfigFilesErr := sdk.GetNginxConfigFiles(config)
+ if getNginxConfigFilesErr == nil {
+ n.writeBackup(config, confFiles, auxFiles)
+ }
+ return fmt.Errorf("error running nginx -t -c %v:\n%s", configLocation, response)
+ }
+
+ log.Infof("Config validated:\n%s", response)
+
+ return nil
+}
+
+// Stop stops an instance of NGINX.
+func (n *NginxBinaryType) Stop(processId, bin string) error {
+ log.Info("Stopping NGINX")
+
+ _, err := runCmd(bin, "-s", "stop")
+ if err != nil {
+ log.Errorf("Stopping NGINX caused error: %v", err)
+ } else {
+ log.Infof("NGINX with process Id: %s stopped", processId)
+ }
+
+ return err
+}
+
+func ensureFilesAllowed(files []*proto.File, allowList map[string]struct{}, path string) error {
+ for _, file := range files {
+ filename := file.Name
+ if !filepath.IsAbs(filename) {
+ filename = filepath.Join(path, filename)
+ }
+ log.Tracef("checking file %s is allowed", filename)
+ if !allowedFile(filename, allowList) {
+ return fmt.Errorf("the file %s is outside the allowed directory list", filename)
+ }
+ }
+ return nil
+}
+
+func hasConfPath(files []*proto.File, confPath string) bool {
+ confDir := filepath.Dir(confPath)
+ for _, file := range files {
+ filename := file.Name
+ if !filepath.IsAbs(filename) {
+ filename = filepath.Join(confDir, filename)
+ }
+ if filename == confPath {
+ return true
+ }
+ }
+ return false
+}
+
+func (n *NginxBinaryType) WriteConfig(config *proto.NginxConfig) (*sdk.ConfigApply, error) {
+ log.Tracef("Writing config: %+v\n", config)
+ details, ok := n.nginxDetailsMap[config.ConfigData.NginxId]
+ if !ok || details == nil {
+ return nil, fmt.Errorf("NGINX instance %s not found", config.ConfigData.NginxId)
+ }
+
+ systemNginxConfig, err := sdk.GetNginxConfig(
+ details.ConfPath,
+ config.ConfigData.NginxId,
+ config.ConfigData.SystemId,
+ n.config.AllowedDirectoriesMap,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if !allowedFile(filepath.Dir(details.ConfPath), n.config.AllowedDirectoriesMap) {
+ return nil, fmt.Errorf("config directory %s not allowed", filepath.Dir(details.ConfPath))
+ }
+
+ confFiles, auxFiles, err := sdk.GetNginxConfigFiles(config)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure this config request does not remove the process config
+ if !hasConfPath(confFiles, details.ConfPath) {
+ return nil, fmt.Errorf("should not delete %s", details.ConfPath)
+ }
+
+ // Ensure all config files are within the allowed list directories.
+ confDir := filepath.Dir(details.ConfPath)
+ if err := ensureFilesAllowed(confFiles, n.config.AllowedDirectoriesMap, confDir); err != nil {
+ return nil, err
+ }
+
+ // Ensure all aux files are within the allowed list directories.
+ if err := ensureFilesAllowed(auxFiles, n.config.AllowedDirectoriesMap, config.GetZaux().GetRootDirectory()); err != nil {
+ return nil, err
+ }
+
+ unpackMutex.Lock()
+ defer unpackMutex.Unlock()
+
+ log.Info("Updating NGINX config")
+ var configApply *sdk.ConfigApply
+ configApply, err = sdk.NewConfigApply(details.ConfPath, n.config.AllowedDirectoriesMap)
+ if err != nil {
+ log.Warnf("config_apply error: %s", err)
+ return nil, err
+ }
+
+ // TODO: return to Control Plane that there was a rollback
+ err = n.env.WriteFiles(configApply, confFiles, filepath.Dir(details.ConfPath), n.config.AllowedDirectoriesMap)
+ if err != nil {
+ log.Warnf("configuration write failed: %s", err)
+ n.writeBackup(config, confFiles, auxFiles)
+ return configApply, err
+ }
+
+ if len(auxFiles) > 0 {
+ auxPath := config.GetZaux().GetRootDirectory()
+ err = n.env.WriteFiles(configApply, auxFiles, auxPath, n.config.AllowedDirectoriesMap)
+ if err != nil {
+ log.Warnf("Auxiliary files write failed: %s", err)
+ return configApply, err
+ }
+ }
+
+ filesToDelete, ok := generateDeleteFromDirectoryMap(config.DirectoryMap, n.config.AllowedDirectoriesMap)
+ if ok {
+ log.Debugf("use explicit set action for delete files %s", filesToDelete)
+ } else {
+ // Delete files that are not in the directory map
+ filesToDelete = getDirectoryMapDiff(systemNginxConfig.DirectoryMap.Directories, config.DirectoryMap.Directories)
+ }
+
+ fileDeleted := make(map[string]struct{})
+ for _, file := range filesToDelete {
+ log.Infof("Deleting file: %s", file)
+ if _, ok = fileDeleted[file]; ok {
+ continue
+ }
+
+ if found, foundErr := FileExists(file); !found {
+ if foundErr == nil {
+ log.Debugf("skip delete for non-existing file: %s", file)
+ continue
+ }
+ // possible perm deny, depends on platform
+ log.Warnf("file exists returned for %s: %s", file, foundErr)
+ return configApply, foundErr
+ }
+ if err = configApply.MarkAndSave(file); err != nil {
+ return configApply, err
+ }
+ if err = os.Remove(file); err != nil {
+ return configApply, err
+ }
+ fileDeleted[file] = struct{}{}
+ }
+
+ return configApply, nil
+}
+
+// generateDeleteFromDirectoryMap return a list of delete files from the directory map where Action File_delete is set.
+// This supports incremental upgrade if the files in the DirectoryMap doesn't have any action set to a non-default value,
+// in which the return bool will be false, to indicate explicit action is not set in the provided DirectoryMap.
+func generateDeleteFromDirectoryMap(
+ directoryMap *proto.DirectoryMap,
+ allowedDirectory map[string]struct{},
+) ([]string, bool) {
+ actionIsSet := false
+ if directoryMap == nil {
+ return nil, actionIsSet
+ }
+ deleteFiles := make([]string, 0)
+ for _, dir := range directoryMap.Directories {
+ for _, f := range dir.Files {
+ if f.Action == proto.File_unset {
+ continue
+ }
+ actionIsSet = true
+ if f.Action != proto.File_delete {
+ continue
+ }
+ path := filepath.Join(dir.Name, f.Name)
+ if !filepath.IsAbs(path) {
+ // can't assume relative path
+ continue
+ }
+ if !allowedFile(path, allowedDirectory) {
+ continue
+ }
+ deleteFiles = append(deleteFiles, path)
+ }
+ }
+ return deleteFiles, actionIsSet
+}
+
+func (n *NginxBinaryType) ReadConfig(confFile, nginxId, systemId string) (*proto.NginxConfig, error) {
+ configPayload, err := sdk.GetNginxConfig(confFile, nginxId, systemId, n.config.AllowedDirectoriesMap)
+ if err != nil {
+ return nil, err
+ }
+
+ // get access logs list for analysis
+ accessLogs := AccessLogs(configPayload)
+ // get error logs list for analysis
+ errorLogs := ErrorLogs(configPayload)
+
+ logMutex.Lock()
+ defer logMutex.Unlock()
+
+ n.accessLogsUpdated = n.UpdateLogs(n.accessLogs, accessLogs)
+ n.errorLogsUpdated = n.UpdateLogs(n.errorLogs, errorLogs)
+
+ return configPayload, nil
+}
+
+func (n *NginxBinaryType) GetAccessLogs() map[string]string {
+ logMutex.Lock()
+ defer logMutex.Unlock()
+ return n.accessLogs
+}
+
+func (n *NginxBinaryType) GetErrorLogs() map[string]string {
+ logMutex.Lock()
+ defer logMutex.Unlock()
+ return n.errorLogs
+}
+
+// SkipLog checks if a logfile should be omitted from analysis
+func (n *NginxBinaryType) SkipLog(filename string) bool {
+ if n.config != nil {
+ for _, filter := range strings.Split(n.config.Nginx.ExcludeLogs, ",") {
+ ok, err := filepath.Match(filter, filename)
+ if err != nil {
+ log.Error("invalid path spec for excluding access_log: ", filter)
+ } else if ok {
+ log.Debugf("excluding access log %q as specified by filter: %q", filename, filter)
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (n *NginxBinaryType) writeBackup(config *proto.NginxConfig, confFiles []*proto.File, auxFiles []*proto.File) {
+ if n.config.Nginx.Debug {
+ allowedDirs := map[string]struct{}{"/tmp": {}}
+ path := filepath.Join("/tmp", strconv.FormatInt(time.Now().Unix(), 10))
+
+ configApply, err := sdk.NewConfigApply("/tmp", n.config.AllowedDirectoriesMap)
+ if err != nil {
+ log.Warnf("config_apply error: %s", err)
+ return
+ }
+
+ log.Tracef("Writing failed configuration to %s", path)
+
+ confFilesCopy := deepCopyWithNewPath(confFiles, config.Zconfig.RootDirectory, path)
+
+ err = n.env.WriteFiles(configApply, confFilesCopy, path, allowedDirs)
+ if err != nil {
+ log.Warnf("Error writing to config %s", err)
+ }
+
+ auxFilesCopy := deepCopyWithNewPath(auxFiles, config.Zconfig.RootDirectory, path)
+
+ err = n.env.WriteFiles(configApply, auxFilesCopy, path, allowedDirs)
+ if err != nil {
+ log.Warnf("Error writing to aux %s", err)
+ }
+
+ if err = configApply.Complete(); err != nil {
+ log.Errorf("Backup config complete failed: %v", err)
+ }
+ }
+}
+
+func deepCopyWithNewPath(files []*proto.File, oldPath, newPath string) []*proto.File {
+ filesCopy := make([]*proto.File, len(files))
+ for index, file := range files {
+ filesCopy[index] = &proto.File{
+ Name: strings.ReplaceAll(file.Name, oldPath, newPath),
+ Lines: file.Lines,
+ Mtime: file.Mtime,
+ Permissions: file.Permissions,
+ Size_: file.Size_,
+ Contents: file.Contents,
+ XXX_NoUnkeyedLiteral: file.XXX_NoUnkeyedLiteral,
+ XXX_unrecognized: file.XXX_unrecognized,
+ XXX_sizecache: file.XXX_sizecache,
+ }
+ }
+ return filesCopy
+}
+
+func getConfPathFromCommand(command string) string {
+ commands := strings.Split(command, " ")
+
+ for i, command := range commands {
+ if command == "-c" {
+ if i < len(commands)-1 {
+ return commands[i+1]
+ }
+ }
+ }
+ return ""
+}
+
+func parseConfigureArguments(line string) (result map[string]interface{}, flags []string) {
+ // need to check for empty strings
+ flags = strings.Split(line[len("configure arguments:"):], " --")
+ result = map[string]interface{}{}
+ for _, flag := range flags {
+ vals := strings.Split(flag, "=")
+ switch len(vals) {
+ case 1:
+ if vals[0] != "" {
+ result[vals[0]] = true
+ }
+ case 2:
+ result[vals[0]] = vals[1]
+ default:
+ break
+ }
+ }
+ return result, flags
+}
+
+func (n *NginxBinaryType) getNginxInfoFrom(ngxExe string) *nginxInfo {
+ if ngxExe == "" {
+ return &nginxInfo{}
+ }
+ if strings.Contains(ngxExe, execDeleted) {
+ log.Infof("nginx was upgraded, using new info")
+ ngxExe = sanitizeExecDeletedPath(ngxExe)
+ }
+ if info, ok := n.nginxInfoMap[ngxExe]; ok {
+ stat, err := os.Stat(ngxExe)
+ if err == nil && stat.ModTime().Equal(info.mtime) {
+ return info
+ }
+ }
+ outbuf, err := runCmd(ngxExe, "-V")
+ if err != nil {
+ log.Errorf("nginx -V failed (%s): %v", outbuf.String(), err)
+ return &nginxInfo{}
+ }
+
+ info := n.getNginxInfoFromBuffer(ngxExe, outbuf)
+ n.nginxInfoMap[ngxExe] = info
+ return info
+}
+
+const (
+ execDeleted = "(deleted)"
+)
+
+func sanitizeExecDeletedPath(exe string) string {
+ firstSpace := strings.Index(exe, execDeleted)
+ if firstSpace != -1 {
+ return strings.TrimSpace(exe[0:firstSpace])
+ }
+ return strings.TrimSpace(exe)
+}
+
+// getNginxInfoFromBuffer -
+func (n *NginxBinaryType) getNginxInfoFromBuffer(exePath string, buffer *bytes.Buffer) *nginxInfo {
+ info := &nginxInfo{}
+ scanner := bufio.NewScanner(buffer)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ switch {
+ case strings.HasPrefix(line, "nginx version"):
+ info.version, info.plusver = parseNginxVersion(line)
+ case strings.HasPrefix(line, "configure arguments"):
+ info.cfgf, info.configureArgs = parseConfigureArguments(line)
+ case strings.HasPrefix(line, "built by"):
+ info.source = line
+ case strings.HasPrefix(line, "built with"):
+ l := strings.ReplaceAll(line, "built with ", "")
+ sslInfo := strings.SplitN(l, " ", 3)
+ for i := range sslInfo {
+ sslInfo[i] = strings.TrimSpace(sslInfo[i])
+ }
+ info.ssl = sslInfo
+ }
+ }
+
+ if info.cfgf["modules-path"] != nil {
+ info.modulesPath = info.cfgf["modules-path"].(string)
+ if info.modulesPath != "" {
+ info.loadableModules, _ = n.parseModulePath(info.modulesPath)
+ }
+ }
+
+ if info.cfgf["prefix"] == nil {
+ info.prefix = defaultNginxOssPrefix
+ } else {
+ info.prefix = info.cfgf["prefix"].(string)
+ }
+
+ // conf path default value but -c overrides it elsewhere
+ if info.cfgf["conf-path"] != nil {
+ info.confPath = info.cfgf["conf-path"].(string)
+ } else {
+ // if conf-path is not specified, assume nginx is built from source and that there is a config file in the config directory
+ info.confPath = path.Join(info.prefix, "/conf/nginx.conf")
+ }
+
+ if info.cfgf["http-log-path"] != nil {
+ info.logPath = info.cfgf["http-log-path"].(string)
+ }
+
+ if info.cfgf["error-log-path"] != nil {
+ info.errorPath = info.cfgf["error-log-path"].(string)
+ }
+ stat, err := os.Stat(exePath)
+ if err == nil {
+ info.mtime = stat.ModTime()
+ }
+ return info
+}
+
+func (n *NginxBinaryType) parseModulePath(dir string) ([]string, error) {
+ result, err := n.env.ReadDirectory(dir, ".so")
+ if err != nil {
+ log.Errorf("Unable to parse module path %v", err)
+ return nil, err
+ }
+ return result, nil
+}
+
+func (n *NginxBinaryType) UpdateLogs(existingLogs map[string]string, newLogs map[string]string) bool {
+ logUpdated := false
+
+ for logFile, logFormat := range newLogs {
+ if !(strings.HasPrefix(logFile, "syslog:") || n.SkipLog(logFile)) {
+ if _, found := existingLogs[logFile]; !found || existingLogs[logFile] != logFormat {
+ logUpdated = true
+ }
+ existingLogs[logFile] = logFormat
+ }
+ }
+
+ // delete old logs
+ for logFile := range existingLogs {
+ if _, found := newLogs[logFile]; !found {
+ delete(existingLogs, logFile)
+ logUpdated = true
+ }
+ }
+
+ return logUpdated
+}
+
+func parseNginxVersion(line string) (version, plusVersion string) {
+ matches := re.FindStringSubmatch(line)
+ plusmatches := plusre.FindStringSubmatch(line)
+
+ if len(plusmatches) > 0 {
+ subNames := plusre.SubexpNames()
+ for i, v := range plusmatches {
+ switch subNames[i] {
+ case "plus":
+ plusVersion = v
+ case "version":
+ version = v
+ }
+ }
+ return version, plusVersion
+ }
+
+ if len(matches) > 0 {
+ for i, key := range re.SubexpNames() {
+ val := matches[i]
+ if key == "version" {
+ version = val
+ }
+ }
+ }
+
+ return version, plusVersion
+}
+
+func buildSsl(ssl []string, source string) *proto.NginxSslMetaData {
+ var nginxSslType proto.NginxSslMetaData_NginxSslType
+ if strings.HasPrefix(source, "built") {
+ nginxSslType = proto.NginxSslMetaData_BUILT
+ } else {
+ nginxSslType = proto.NginxSslMetaData_RUN
+ }
+
+ return &proto.NginxSslMetaData{
+ SslType: nginxSslType,
+ Details: ssl,
+ }
+}
+
+func buildPlus(plusver string) *proto.NginxPlusMetaData {
+ plus := false
+ if plusver != "" {
+ plus = true
+ }
+ return &proto.NginxPlusMetaData{
+ Enabled: plus,
+ Release: plusver,
+ }
+}
+
+// runtimeFromConfigure parse and return the runtime modules from `nginx -V` configured args
+// these are usually in the form of "with-X_module", so we just look for "with" prefix, and "module" suffix.
+func runtimeFromConfigure(configure []string) []string {
+ pkgs := make([]string, 0)
+ for _, arg := range configure {
+ if i := strings.Index(arg, withWithPrefix); i > -1 && strings.HasSuffix(arg, withModuleSuffix) {
+ pkgs = append(pkgs, arg[i+len(withWithPrefix):])
+ }
+ }
+
+ return pkgs
+}
+
+// AccessLogs returns a list of access logs in the config
+func AccessLogs(p *proto.NginxConfig) map[string]string {
+ var found = make(map[string]string)
+ for _, accessLog := range p.GetAccessLogs().GetAccessLog() {
+ // check if the access log is readable or not
+ if accessLog.GetReadable() && accessLog.GetName() != "off" {
+ name := strings.Split(accessLog.GetName(), " ")[0]
+ format := accessLog.GetFormat()
+ found[name] = format
+ } else {
+ log.Warnf("NGINX Access log %s is not readable or is disabled. Please make it readable and enabled in order for NGINX metrics to be collected.", accessLog.GetName())
+ }
+ }
+
+ return found
+}
+
+// ErrorLogs returns a list of error logs in the config
+func ErrorLogs(p *proto.NginxConfig) map[string]string {
+ var found = make(map[string]string)
+ for _, errorLog := range p.GetErrorLogs().GetErrorLog() {
+ // check if the error log is readable or not
+ if errorLog.GetReadable() {
+ name := strings.Split(errorLog.GetName(), " ")[0]
+ // In the future, different error log formats will be supported
+ found[name] = ""
+ } else {
+ log.Warnf("NGINX Error log %s is not readable or is disabled. Please make it readable and enabled in order for NGINX metrics to be collected.", errorLog.GetName())
+ }
+ }
+
+ return found
+}
+
+// Returns a list of files that are in the currentDirectoryMap but not in the incomingDirectoryMap
+func getDirectoryMapDiff(currentDirectoryMap []*proto.Directory, incomingDirectoryMap []*proto.Directory) []string {
+ diff := []string{}
+
+ incomingMap := make(map[string]struct{})
+ for _, incomingDirectory := range incomingDirectoryMap {
+ for _, incomingFile := range incomingDirectory.Files {
+ filePath := incomingFile.Name
+ if !filepath.IsAbs(filePath) {
+ filePath = filepath.Join(incomingDirectory.Name, filePath)
+ }
+ incomingMap[filePath] = struct{}{}
+ }
+ }
+
+ for _, currentDirectory := range currentDirectoryMap {
+ for _, currentFile := range currentDirectory.Files {
+ filePath := currentFile.Name
+ if !filepath.IsAbs(filePath) {
+ filePath = filepath.Join(currentDirectory.Name, currentFile.Name)
+ }
+ if _, ok := incomingMap[filePath]; !ok {
+ diff = append(diff, filePath)
+ }
+ }
+ }
+
+ return diff
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/os.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/os.go
new file mode 100644
index 000000000..df43deb48
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/os.go
@@ -0,0 +1,42 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "os"
+)
+
+// FileExists determines if the specified file given by the file path exists on the system.
+// If the file does NOT exist on the system the bool will be false and the error will be nil,
+// if the error is not nil then it's possible the file might exist but an error verifying it's
+// existence has occurred.
+func FileExists(filePath string) (bool, error) {
+ _, err := os.Stat(filePath)
+ if os.IsNotExist(err) {
+ return false, nil
+ } else if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+// FilesExists determines if the specified set of files exists on the system. If any of the files
+// do NOT exist on the system the bool will be false and the error will be nil, if the error is
+// not nil then it's possible the files might exist but an error verifying their existence has
+// occurred.
+func FilesExists(filePaths []string) (bool, error) {
+ for _, filePath := range filePaths {
+ fileExists, err := FileExists(filePath)
+ if !fileExists || err != nil {
+ return false, err
+ }
+ }
+
+ return true, nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/pipe.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/pipe.go
new file mode 100644
index 000000000..3a2984026
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/pipe.go
@@ -0,0 +1,116 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "context"
+ "sync"
+
+ messagebus "github.com/vardius/message-bus"
+)
+
+const (
+ MessageQueueSize = 100
+ MaxPlugins = 100
+)
+
+type MessagePipeInterface interface {
+ Register(int, ...Plugin) error
+ Process(...*Message)
+ Run()
+ Context() context.Context
+ GetPlugins() []Plugin
+}
+
+type MessagePipe struct {
+ messageChannel chan *Message
+ plugins []Plugin
+ ctx context.Context
+ cancel context.CancelFunc
+ mu sync.Mutex
+ bus messagebus.MessageBus
+}
+
+func NewMessagePipe(ctx context.Context) *MessagePipe {
+ pipeContext, pipeCancel := context.WithCancel(ctx)
+ return &MessagePipe{
+ messageChannel: make(chan *Message, MessageQueueSize),
+ plugins: make([]Plugin, 0, MaxPlugins),
+ ctx: pipeContext,
+ cancel: pipeCancel,
+ mu: sync.Mutex{},
+ }
+}
+
+func (p *MessagePipe) Register(size int, plugins ...Plugin) error {
+ p.mu.Lock()
+
+ p.plugins = append(p.plugins, plugins...)
+ p.bus = messagebus.New(size)
+
+ for _, plugin := range p.plugins {
+ for _, subscription := range plugin.Subscriptions() {
+ err := p.bus.Subscribe(subscription, plugin.Process)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ p.mu.Unlock()
+ return nil
+}
+
+func (p *MessagePipe) Process(messages ...*Message) {
+ for _, m := range messages {
+ select {
+ case p.messageChannel <- m:
+ case <-p.ctx.Done():
+ return
+ }
+ }
+}
+
+func (p *MessagePipe) Run() {
+ p.initPlugins()
+
+ for {
+ select {
+ case <-p.ctx.Done():
+
+ for _, r := range p.plugins {
+ r.Close()
+ }
+ close(p.messageChannel)
+
+ return
+ case m := <-p.messageChannel:
+ p.mu.Lock()
+ p.bus.Publish(m.Topic(), m)
+ p.mu.Unlock()
+ }
+ }
+}
+
+func (p *MessagePipe) Context() context.Context {
+ return p.ctx
+}
+
+func (p *MessagePipe) Cancel() context.CancelFunc {
+ return p.cancel
+}
+
+func (p *MessagePipe) GetPlugins() []Plugin {
+ return p.plugins
+}
+
+func (p *MessagePipe) initPlugins() {
+ for _, r := range p.plugins {
+ r.Init(p)
+ }
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/plugin.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/plugin.go
new file mode 100644
index 000000000..f2f5af37d
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/plugin.go
@@ -0,0 +1,16 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+type Plugin interface {
+ Init(MessagePipeInterface)
+ Close()
+ Process(*Message)
+ Info() *Info
+ Subscriptions() []string
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/process.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/process.go
new file mode 100644
index 000000000..e3706ee98
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/process.go
@@ -0,0 +1,55 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+import (
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/process"
+)
+
+// CheckForProcesses takes in a slice of strings that represents the process
+// names to check for then returns a slice of strings of the processes that
+// were checked for and NOT found.
+func CheckForProcesses(processesToCheck []string) ([]string, error) {
+ runningProcesses, err := process.Processes()
+ if err != nil {
+ return nil, err
+ }
+
+ processCheckCopy := make([]string, len(processesToCheck))
+ copy(processCheckCopy, processesToCheck)
+
+ for _, process := range runningProcesses {
+ if len(processCheckCopy) == 0 {
+ return processCheckCopy, nil
+ }
+
+ procName, err := process.Name()
+ if err != nil {
+ continue
+ }
+
+ procCmd, err := process.CmdlineSlice()
+ if err != nil {
+ continue
+ }
+
+ if found, idx := SliceContainsString(processCheckCopy, procName); found {
+ processCheckCopy = append(processCheckCopy[:idx], processCheckCopy[idx+1:]...)
+ } else if len(procCmd) > 0 {
+ splitCmd := strings.Split(procCmd[0], "/")
+ procName = splitCmd[len(splitCmd)-1]
+ if found, idx := SliceContainsString(processCheckCopy, procName); found {
+ processCheckCopy = append(processCheckCopy[:idx], processCheckCopy[idx+1:]...)
+ }
+ }
+ }
+
+ return processCheckCopy, nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/slice.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/slice.go
new file mode 100644
index 000000000..95d3544ae
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/slice.go
@@ -0,0 +1,21 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+// SliceContainsString takes in a slice of strings and a string to check for
+// within the supplied slice of strings, then returns a bool indicating if the
+// the specified string was found and the index where it was found. If the
+// specified string was not found then the index returned is -1.
+func SliceContainsString(slice []string, toFind string) (bool, int) {
+ for idx, str := range slice {
+ if str == toFind {
+ return true, idx
+ }
+ }
+ return false, -1
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/src/core/topics.go b/test/integration/vendor/github.com/nginx/agent/v2/src/core/topics.go
new file mode 100644
index 000000000..67a6e1ff0
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/src/core/topics.go
@@ -0,0 +1,56 @@
+/**
+ * Copyright (c) F5, Inc.
+ *
+ * This source code is licensed under the Apache License, Version 2.0 license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+package core
+
+const (
+ UNKNOWN = "unknown"
+ RegistrationPrefix = "registration."
+ RegistrationCompletedTopic = RegistrationPrefix + "completed"
+ RegisterWithDataplaneSoftwareDetails = RegistrationPrefix + "with.dataplane.software.details"
+ CommNginxConfig = "nginx.config"
+ NginxConfigUpload = "nginx.config.upload"
+ NginxReload = "nginx.reload"
+ NginxReloadComplete = "nginx.reload.complete"
+ NginxStart = "nginx.start"
+ NginxStop = "nginx.stop"
+ NginxPluginConfigured = "nginx.plugin.config"
+ NginxStatusAPIUpdate = "nginx.status.api.update"
+ NginxInstancesFound = "nginx.instances.found"
+ NginxMasterProcCreated = "nginx.master.created"
+ NginxMasterProcKilled = "nginx.master.killed"
+ NginxWorkerProcCreated = "nginx.worker.created"
+ NginxWorkerProcKilled = "nginx.worker.killed"
+ NginxDetailProcUpdate = "nginx.proc.update"
+ NginxConfigValidationPending = "nginx.config.validation.pending"
+ NginxConfigValidationFailed = "nginx.config.validation.failed"
+ NginxConfigValidationSucceeded = "nginx.config.validation.succeeded"
+ NginxConfigApplyFailed = "nginx.config.apply.failed"
+ NginxConfigApplySucceeded = "nginx.config.apply.succeeded"
+ CommPrefix = "comms."
+ CommStatus = CommPrefix + "status"
+ CommMetrics = CommPrefix + "metrics"
+ CommRegister = CommPrefix + "register"
+ CommResponse = CommPrefix + "response"
+ AgentStarted = "agent.started"
+ AgentConnected = "agent.connected"
+ AgentConfig = "agent.config"
+ AgentConfigChanged = "agent.config.changed"
+ AgentCollectorsUpdate = "agent.collectors.update"
+ MetricReport = "metrics.report"
+ LoggerPrefix = "logger."
+ LoggerLevel = LoggerPrefix + "level"
+ LoggerPath = LoggerPrefix + "path"
+ DataplaneChanged = "dataplane.changed"
+ DataplaneFilesChanged = "dataplane.fileschanged"
+ Events = "events"
+ FileWatcherEnabled = "file.watcher.enabled"
+ ConfigRollbackResponse = "config.rollback.response"
+ DataplaneSoftwareDetailsUpdated = "dataplane.software.details.updated"
+ EnableExtension = "enable.extension"
+ AgentAPIConfigApplyResponse = "agent.api.config.apply.response"
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/agent_config.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/agent_config.go
new file mode 100644
index 000000000..58fa133dd
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/agent_config.go
@@ -0,0 +1,146 @@
+package utils
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+
+ "github.com/spf13/viper"
+
+ agent_config "github.com/nginx/agent/sdk/v2/agent/config"
+ "github.com/nginx/agent/v2/src/core/config"
+
+ sysutils "github.com/nginx/agent/v2/test/utils/system"
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ tempAgentConfFileName = "temp-nginx-agent.conf"
+ tempDynamicAgentConfFileName = "temp-agent-dynamic.conf"
+ DisplayNameKey = "display_name"
+ DefaultTestDisplayName = "nginx-agent-repo"
+)
+
+var (
+ // Get variables for parent directories
+ _, absFilePath, _, _ = runtime.Caller(0)
+ absUtilsDirPath = filepath.Dir(absFilePath)
+ absTestDirPath = filepath.Dir(absUtilsDirPath)
+ delFunc = func() {}
+
+ // Absolute paths to test files
+ testAgentConfPath = absTestDirPath + "/testdata/configs/nginx-agent.conf"
+ testAgentDynamicConfPath = absTestDirPath + "/testdata/configs/agent-dynamic.conf"
+)
+
+func GetMockAgentConfig() *config.Config {
+ return &config.Config{
+ ClientID: "12345",
+ Tags: InitialConfTags,
+ ConfigDirs: "/testDirs",
+ AgentMetrics: config.AgentMetrics{
+ BulkSize: 1,
+ ReportInterval: 5,
+ CollectionInterval: 1,
+ Mode: "aggregated",
+ },
+ }
+}
+
+// GetTestAgentConfigPath gets the absolute path to the test agent config
+func GetTestAgentConfigPath() string {
+ return testAgentConfPath
+}
+
+// CreateTestAgentConfigEnv creates an agent config file named "temp-nginx-agent.conf"
+// and a dynamic config named "temp-agent-dynamic.conf" meant for testing in the current
+// working directory. Additionally, a Viper config is created that has its variables set
+// based off the created conf files ("temp-nginx-agent.conf" and "temp-agent-dynamic.conf").
+// It returns the name of the config ("nginx-agent.conf"), the name of the of the dynamic
+// config ("temp-agent-dynamic.conf"), and a function to call that deletes the both of the
+// files that were created.
+func CreateTestAgentConfigEnv() (string, string, func(), error) {
+ wg := &sync.WaitGroup{}
+ err := make(chan error, 1)
+
+ wg.Add(1)
+ go func() {
+ err <- setupRoutine(wg)
+ }()
+ wg.Wait()
+
+ if err := <-err; err != nil {
+ return "", "", nil, err
+ }
+
+ return tempAgentConfFileName, tempDynamicAgentConfFileName, delFunc, nil
+}
+
+func setupRoutine(wg *sync.WaitGroup) error {
+ // Setup Viper and Config variables
+ // Register the temp agent config that was created
+ // Set viper config properties from created test config
+
+ // Create a temp agent conf and dynamic agent conf in the current directory
+ // for calling tests to utilize
+ confDeleteFunc, err := sysutils.CopyFile(testAgentConfPath, tempAgentConfFileName)
+ if err != nil {
+ err = confDeleteFunc()
+ if err != nil {
+ log.Errorf("error occurred deleting configuration: %v", err)
+ }
+ return fmt.Errorf("error copying file %s to destination %s", testAgentConfPath, tempAgentConfFileName)
+ }
+
+ dynamicConfDeleteFunc, err := sysutils.CopyFile(testAgentDynamicConfPath, tempDynamicAgentConfFileName)
+ if err != nil {
+ delFunc()
+ return fmt.Errorf("error copying file %s to destination %s", testAgentConfPath, tempAgentConfFileName)
+ }
+
+ // Create the delete func that is responsible for cleaning up both temp files
+ // that are created
+ delFunc = func() {
+ err = confDeleteFunc()
+ if err != nil {
+ log.Errorf("error occurred deleting configuration: %v", err)
+ }
+ err = dynamicConfDeleteFunc()
+ if err != nil {
+ log.Errorf("error occurred deleting dynamic configuration: %v", err)
+ }
+ }
+
+ // Set the Viper values and variables
+ viper.Reset()
+ os.Clearenv()
+ config.ROOT_COMMAND.ResetFlags()
+ config.ROOT_COMMAND.ResetCommands()
+ config.Viper = viper.NewWithOptions(viper.KeyDelimiter(agent_config.KeyDelimiter))
+ config.SetDefaults()
+ config.RegisterFlags()
+
+ // Get current directory to allow
+ curDir, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+
+ cfg, err := config.RegisterConfigFile(fmt.Sprintf("%s/%s", curDir, tempDynamicAgentConfFileName), tempAgentConfFileName, []string{"."}...)
+ if err != nil {
+ delFunc()
+ return fmt.Errorf("failed to register config file (%s) - %v", tempAgentConfFileName, err)
+ }
+
+ err = config.LoadPropertiesFromFile(cfg)
+ if err != nil {
+ delFunc()
+ return fmt.Errorf("failed to load properties from config file (%s) - %v", tempAgentConfFileName, err)
+ }
+
+ config.Viper.Set(config.ConfigPathKey, cfg)
+ wg.Done()
+ return nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/command_client.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/command_client.go
new file mode 100644
index 000000000..12934de57
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/command_client.go
@@ -0,0 +1,126 @@
+package utils
+
+import (
+ "context"
+ "time"
+
+ "github.com/nginx/agent/sdk/v2/client"
+ "github.com/nginx/agent/sdk/v2/interceptors"
+ "github.com/nginx/agent/sdk/v2/proto"
+ "github.com/stretchr/testify/mock"
+ "google.golang.org/grpc"
+)
+
+type MockCommandClient struct {
+ mock.Mock
+}
+
+func GetMockCommandClient(config *proto.NginxConfig) *MockCommandClient {
+ client := NewMockCommandClient()
+ client.On("Download", mock.Anything).Return(config, nil)
+ client.On("Upload", mock.Anything, mock.Anything).Return(nil)
+ return client
+}
+
+func NewMockCommandClient() *MockCommandClient {
+ return &MockCommandClient{}
+}
+
+var _ client.Commander = NewMockCommandClient()
+
+func (m *MockCommandClient) Connect(ctx context.Context) error {
+ args := m.Called(ctx)
+
+ return args.Error(0)
+}
+
+func (m *MockCommandClient) Close() error {
+ args := m.Called()
+
+ return args.Error(0)
+}
+
+func (m *MockCommandClient) Server() string {
+ args := m.Called()
+
+ return args.String(0)
+}
+
+func (m *MockCommandClient) WithServer(s string) client.Client {
+ m.Called(s)
+
+ return m
+}
+
+func (m *MockCommandClient) DialOptions() []grpc.DialOption {
+ args := m.Called()
+
+ return args.Get(0).([]grpc.DialOption)
+}
+
+func (m *MockCommandClient) WithDialOptions(options ...grpc.DialOption) client.Client {
+ m.Called(options)
+
+ return m
+}
+
+func (m *MockCommandClient) ChunksSize() int {
+ args := m.Called()
+
+ return args.Int(0)
+}
+
+func (m *MockCommandClient) WithChunkSize(i int) client.Client {
+ m.Called(i)
+
+ return m
+}
+
+func (m *MockCommandClient) WithInterceptor(interceptor interceptors.Interceptor) client.Client {
+ m.Called(interceptor)
+
+ return m
+}
+
+func (m *MockCommandClient) WithClientInterceptor(interceptor interceptors.ClientInterceptor) client.Client {
+ m.Called(interceptor)
+
+ return m
+}
+
+func (m *MockCommandClient) WithConnWaitDuration(d time.Duration) client.Client {
+ m.Called(d)
+
+ return m
+}
+
+func (m *MockCommandClient) WithBackoffSettings(backoffSettings client.BackoffSettings) client.Client {
+ m.Called(backoffSettings)
+
+ return m
+}
+
+func (m *MockCommandClient) Send(ctx context.Context, message client.Message) error {
+ m.Called(ctx, message)
+
+ return nil
+}
+
+func (m *MockCommandClient) Recv() <-chan client.Message {
+ args := m.Called()
+
+ return args.Get(0).(<-chan client.Message)
+}
+
+func (m *MockCommandClient) Download(_ context.Context, meta *proto.Metadata) (*proto.NginxConfig, error) {
+ args := m.Called(meta)
+ cfg := args.Get(0).(*proto.NginxConfig)
+ err := args.Error(1)
+
+ return cfg, err
+}
+
+func (m *MockCommandClient) Upload(_ context.Context, cfg *proto.NginxConfig, messageId string) error {
+ args := m.Called(cfg, messageId)
+ return args.Error(0)
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/defaults.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/defaults.go
new file mode 100644
index 000000000..8bce59bb6
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/defaults.go
@@ -0,0 +1,12 @@
+package utils
+
+const (
+ InitialConfTag1 = "locally-tagged"
+ InitialConfTag2 = "tagged-locally"
+)
+
+var (
+ // These initial conf tags come from the values located in
+ // ../testdata/configs/agent-dynamic.conf
+ InitialConfTags = []string{InitialConfTag1, InitialConfTag2}
+)
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/environment.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/environment.go
new file mode 100644
index 000000000..b558011ff
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/environment.go
@@ -0,0 +1,121 @@
+package utils
+
+import (
+ "os"
+
+ "github.com/nginx/agent/sdk/v2/proto"
+ "github.com/nginx/agent/v2/src/core"
+ "github.com/stretchr/testify/mock"
+)
+
+type MockEnvironment struct {
+ mock.Mock
+}
+
+func GetProcesses() []core.Process {
+ return []core.Process{
+ {Pid: 1, Name: "12345", IsMaster: true},
+ {Pid: 2, ParentPid: 1, Name: "worker-1", IsMaster: false},
+ {Pid: 3, ParentPid: 1, Name: "worker-2", IsMaster: false},
+ }
+}
+
+func GetMockEnv() *MockEnvironment {
+ env := NewMockEnvironment()
+ env.On("NewHostInfo", mock.Anything, mock.Anything, mock.Anything).Return(&proto.HostInfo{
+ Hostname: "test-host",
+ })
+ return env
+}
+
+func GetMockEnvWithProcess() *MockEnvironment {
+ env := NewMockEnvironment()
+ env.On("Processes", mock.Anything).Return(GetProcesses())
+ return env
+}
+
+func GetMockEnvWithHostAndProcess() *MockEnvironment {
+ env := GetMockEnv()
+ env.On("Processes", mock.Anything).Return(GetProcesses())
+ return env
+}
+
+func NewMockEnvironment() *MockEnvironment {
+ return &MockEnvironment{}
+}
+
+var _ core.Environment = NewMockEnvironment()
+
+func (m *MockEnvironment) NewHostInfo(agentVersion string, tags *[]string, configDirs string, clearCache bool) *proto.HostInfo {
+ args := m.Called(agentVersion, tags)
+ returned, ok := args.Get(0).(*proto.HostInfo)
+ if !ok {
+ return &proto.HostInfo{
+ Agent: agentVersion,
+ Boot: 0,
+ Hostname: "test-host",
+ DisplayName: "",
+ OsType: "",
+ Uuid: "",
+ Uname: "",
+ Partitons: []*proto.DiskPartition{},
+ Network: &proto.Network{},
+ Processor: []*proto.CpuInfo{},
+ Release: &proto.ReleaseInfo{},
+ }
+ }
+ return returned
+}
+
+func (m *MockEnvironment) GetHostname() string {
+ return "test-host"
+}
+
+func (m *MockEnvironment) GetSystemUUID() string {
+ return "12345678"
+}
+
+func (m *MockEnvironment) ReadDirectory(dir string, ext string) ([]string, error) {
+ m.Called(dir, ext)
+ return []string{}, nil
+}
+
+func (m *MockEnvironment) ReadFile(file string) ([]byte, error) {
+ m.Called(file)
+ return []byte{}, nil
+}
+
+func (m *MockEnvironment) Processes() (result []core.Process) {
+ ret := m.Called()
+ return ret.Get(0).([]core.Process)
+}
+
+func (m *MockEnvironment) WriteFiles(backup core.ConfigApplyMarker, files []*proto.File, prefix string, allowedDirs map[string]struct{}) error {
+ m.Called(backup, files, prefix, allowedDirs)
+ return nil
+}
+
+func (m *MockEnvironment) FileStat(path string) (os.FileInfo, error) {
+ m.Called(path)
+ return os.Stat(path)
+}
+
+func (m *MockEnvironment) DiskDevices() ([]string, error) {
+ ret := m.Called()
+ return ret.Get(0).([]string), ret.Error(1)
+}
+
+func (m *MockEnvironment) GetNetOverflow() (float64, error) {
+ m.Called()
+ return 0.0, nil
+}
+
+func (m *MockEnvironment) GetContainerID() (string, error) {
+ m.Called()
+ return "12345", nil
+}
+
+func (m *MockEnvironment) IsContainer() bool {
+ ret := m.Called()
+ return ret.Get(0).(bool)
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/metrics_report_client.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/metrics_report_client.go
new file mode 100644
index 000000000..f47cfb5e7
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/metrics_report_client.go
@@ -0,0 +1,87 @@
+package utils
+
+import (
+ "context"
+ "time"
+
+ "github.com/nginx/agent/sdk/v2/client"
+ "github.com/nginx/agent/sdk/v2/interceptors"
+ "github.com/stretchr/testify/mock"
+ "google.golang.org/grpc"
+)
+
+type MockMetricsReportClient struct {
+ mock.Mock
+}
+
+func NewMockMetricsReportClient() *MockMetricsReportClient {
+ return &MockMetricsReportClient{}
+}
+
+var _ client.MetricReporter = NewMockMetricsReportClient()
+
+func (m *MockMetricsReportClient) Server() string {
+ args := m.Called()
+
+ return args.String(0)
+}
+
+func (m *MockMetricsReportClient) WithServer(s string) client.Client {
+ m.Called(s)
+
+ return m
+}
+
+func (m *MockMetricsReportClient) DialOptions() []grpc.DialOption {
+ args := m.Called()
+
+ return args.Get(0).([]grpc.DialOption)
+}
+
+func (m *MockMetricsReportClient) WithDialOptions(options ...grpc.DialOption) client.Client {
+ m.Called(options)
+
+ return m
+}
+
+func (m *MockMetricsReportClient) WithInterceptor(interceptor interceptors.Interceptor) client.Client {
+ m.Called(interceptor)
+
+ return m
+}
+
+func (m *MockMetricsReportClient) WithClientInterceptor(interceptor interceptors.ClientInterceptor) client.Client {
+ m.Called(interceptor)
+
+ return m
+}
+
+func (m *MockMetricsReportClient) WithConnWaitDuration(d time.Duration) client.Client {
+ m.Called(d)
+
+ return m
+}
+
+func (m *MockMetricsReportClient) WithBackoffSettings(backoffSettings client.BackoffSettings) client.Client {
+ m.Called(backoffSettings)
+
+ return m
+}
+
+func (m *MockMetricsReportClient) Connect(ctx context.Context) error {
+ args := m.Called(ctx)
+
+ return args.Error(0)
+}
+
+func (m *MockMetricsReportClient) Send(ctx context.Context, message client.Message) error {
+ args := m.Called(ctx, message)
+
+ return args.Error(0)
+}
+
+func (m *MockMetricsReportClient) Close() error {
+ args := m.Called()
+
+ return args.Error(0)
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/nginx.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/nginx.go
new file mode 100644
index 000000000..2b614ef7a
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/nginx.go
@@ -0,0 +1,171 @@
+package utils
+
+import (
+ "github.com/nginx/agent/sdk/v2"
+ "github.com/nginx/agent/sdk/v2/proto"
+ "github.com/nginx/agent/v2/src/core"
+ "github.com/stretchr/testify/mock"
+)
+
+type MockNginxBinary struct {
+ mock.Mock
+}
+
+func GetDetailsMap() map[string]*proto.NginxDetails {
+ return map[string]*proto.NginxDetails{
+ "12345": {
+ NginxId: "12345",
+ Version: "1.2.1",
+ ConfPath: "/var/conf",
+ ProcessId: "123",
+ ProcessPath: "/path/to/nginx",
+ StartTime: 1564894894,
+ BuiltFromSource: false,
+ LoadableModules: []string{},
+ RuntimeModules: []string{},
+ Plus: &proto.NginxPlusMetaData{
+ Enabled: true,
+ Release: "1.2.1",
+ },
+ Ssl: &proto.NginxSslMetaData{},
+ StatusUrl: "",
+ ConfigureArgs: []string{},
+ },
+ }
+}
+
+func GetMockNginxBinary() *MockNginxBinary {
+ binary := NewMockNginxBinary()
+
+ binary.On("GetNginxDetailsMapFromProcesses", mock.Anything).Return(GetDetailsMap())
+ binary.On("GetNginxIDForProcess", mock.Anything).Return(GetDetailsMap())
+ binary.On("GetNginxDetailsFromProcess", mock.Anything).Return(GetDetailsMap()["12345"])
+
+ return binary
+}
+
+func (m *MockNginxBinary) GetNginxDetailsByID(nginxID string) *proto.NginxDetails {
+ args := m.Called(nginxID)
+ return args.Get(0).(*proto.NginxDetails)
+}
+
+func (m *MockNginxBinary) GetChildProcesses() map[string][]*proto.NginxDetails {
+ args := m.Called()
+ return args.Get(0).(map[string][]*proto.NginxDetails)
+}
+
+func (m *MockNginxBinary) WriteConfig(config *proto.NginxConfig) (*sdk.ConfigApply, error) {
+ args := m.Called(config)
+ confApply := args.Get(0).(*sdk.ConfigApply)
+
+ return confApply, args.Error(1)
+}
+
+func (m *MockNginxBinary) ReadConfig(path, nginxId, systemId string) (*proto.NginxConfig, error) {
+ args := m.Called(path, nginxId, systemId)
+ config := args.Get(0).(*proto.NginxConfig)
+ err := args.Error(1)
+
+ return config, err
+}
+
+func (m *MockNginxBinary) Start(nginxId, bin string) error {
+ m.Called(nginxId, bin)
+ return nil
+}
+
+func (m *MockNginxBinary) Stop(processId, bin string) error {
+ m.Called(processId, bin)
+ return nil
+}
+
+func (m *MockNginxBinary) Reload(processId, bin string) error {
+ m.Called(processId, bin)
+ return nil
+}
+
+func (m *MockNginxBinary) ValidateConfig(processId, bin, configLocation string, config *proto.NginxConfig, configApply *sdk.ConfigApply) error {
+ args := m.Called(processId, bin, configLocation, config, configApply)
+ return args.Error(0)
+}
+
+func (m *MockNginxBinary) GetNginxDetailsMapFromProcesses(nginxProcesses []core.Process) map[string]*proto.NginxDetails {
+ args := m.Called(nginxProcesses)
+ return args.Get(0).(map[string]*proto.NginxDetails)
+}
+
+func (m *MockNginxBinary) UpdateNginxDetailsFromProcesses(nginxProcesses []core.Process) {
+ m.Called(nginxProcesses)
+}
+
+func (m *MockNginxBinary) GetNginxIDForProcess(nginxProcess core.Process) string {
+ args := m.Called(nginxProcess)
+ return args.String(0)
+}
+
+func (m *MockNginxBinary) GetNginxDetailsFromProcess(nginxProcess core.Process) *proto.NginxDetails {
+ args := m.Called(nginxProcess)
+ return args.Get(0).(*proto.NginxDetails)
+}
+
+func (m *MockNginxBinary) UpdateLogs(existing map[string]string, newLogs map[string]string) bool {
+ args := m.Called(existing, newLogs)
+ return args.Bool(0)
+}
+
+func (m *MockNginxBinary) GetAccessLogs() map[string]string {
+ args := m.Called()
+ return args.Get(0).(map[string]string)
+}
+
+func (m *MockNginxBinary) GetErrorLogs() map[string]string {
+ args := m.Called()
+ return args.Get(0).(map[string]string)
+}
+
+func NewMockNginxBinary() *MockNginxBinary {
+ return &MockNginxBinary{}
+}
+
+var _ core.NginxBinary = NewMockNginxBinary()
+
+func GetDetailsNginxOssConfig() string {
+ return `
+ user nginx;
+ worker_processes auto;
+
+ error_log /usr/local/nginx/error.log notice;
+ pid /var/run/nginx.pid;
+
+ events {
+ worker_connections 1024;
+ }
+
+
+ http {
+ default_type application/octet-stream;
+
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+
+ access_log /usr/local/nginx/access.log main;
+
+ sendfile on;
+ #tcp_nopush on;
+
+ keepalive_timeout 65;
+
+ #gzip on;
+ server {
+ listen 8080;
+ server_name localhost;
+ location /api {
+ stub_status;
+ allow 127.0.0.1;
+ deny all;
+ }
+ }
+ }
+ `
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/process.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/process.go
new file mode 100644
index 000000000..dfd405618
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/process.go
@@ -0,0 +1,31 @@
+package utils
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "time"
+)
+
+// StartFakeProcesses creates a fake process for each of the string names and
+// each fake process lasts for fakeProcsDuration of time (seconds), the
+// function that is returned can be ran to kill all the fake processes that
+// were created.
+func StartFakeProcesses(names []string, fakeProcsDuration string) func() {
+ pList := make([]*os.Process, 0)
+ for _, name := range names {
+ pCmd := exec.Command("bash", "-c", fmt.Sprintf("exec -a %s sleep %s", name, fakeProcsDuration))
+ _ = pCmd.Start()
+
+ // Arbitrary sleep to ensure process has time to come up
+ time.Sleep(time.Millisecond * 150)
+
+ pList = append(pList, pCmd.Process)
+ }
+
+ return func() {
+ for _, p := range pList {
+ _ = p.Kill()
+ }
+ }
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/symbols.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/symbols.go
new file mode 100644
index 000000000..1a06f3d0d
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/symbols.go
@@ -0,0 +1,40 @@
+package utils
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+)
+
+const (
+ symbolsFile = "test/symbols.json"
+ symbolsFileLocal = "test/symbols-local.json"
+)
+
+type Symbols struct {
+ Host string `json:"host"`
+ Port int64 `json:"port"`
+ TLS bool `json:"tls"`
+}
+
+func LoadSymbolsFile() (*Symbols, error) {
+ var symFile string
+
+ _, err := os.Stat(symbolsFile)
+ if err == nil {
+ symFile = symbolsFile
+ } else {
+ symFile = symbolsFileLocal
+ }
+
+ content, err := ioutil.ReadFile(symFile)
+ if err != nil {
+ return nil, err
+ }
+
+ var sym *Symbols
+ if err = json.Unmarshal(content, &sym); err != nil {
+ return nil, err
+ }
+ return sym, nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/system/system.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/system/system.go
new file mode 100644
index 000000000..eb7799ff8
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/system/system.go
@@ -0,0 +1,41 @@
+package sysutils
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+func CopyFile(src, dest string) (func() error, error) {
+ in, err := os.Open(src)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating test agent config (%s) when opening file - %v", src, err)
+ }
+ defer in.Close()
+
+ out, err := os.Create(dest)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating test agent config (%s) when creating file - %v", dest, err)
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating test agent config (%s) when copying over file - %v", src, err)
+ }
+
+ err = out.Close()
+ if err != nil {
+ return nil, fmt.Errorf("failed creating test agent config (%s) when closing file - %v", dest, err)
+ }
+
+ deleteFunc := func() error {
+ err := os.Remove(dest)
+ if err != nil {
+ return fmt.Errorf("failed to delete test agent config (%s) - %v", dest, err)
+ }
+ return nil
+ }
+
+ return deleteFunc, nil
+}
diff --git a/test/integration/vendor/github.com/nginx/agent/v2/test/utils/tls.go b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/tls.go
new file mode 100644
index 000000000..513788a66
--- /dev/null
+++ b/test/integration/vendor/github.com/nginx/agent/v2/test/utils/tls.go
@@ -0,0 +1,35 @@
+package utils
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "io/ioutil"
+
+ log "github.com/sirupsen/logrus"
+
+ "google.golang.org/grpc/credentials"
+)
+
+func LoadKeyPair() credentials.TransportCredentials {
+ certificate, err := tls.LoadX509KeyPair("certs/server.crt", "certs/server.key")
+ if err != nil {
+ log.Fatalf("Load server certification failed: %v", err)
+ }
+
+ data, err := ioutil.ReadFile("certs/client/ca.crt")
+ if err != nil {
+ log.Fatalf("can't read ca file: %v", err)
+ }
+
+ capool := x509.NewCertPool()
+ if !capool.AppendCertsFromPEM(data) {
+ log.Fatal("can't add ca cert")
+ }
+
+ tlsConfig := &tls.Config{
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ Certificates: []tls.Certificate{certificate},
+ ClientCAs: capool,
+ }
+ return credentials.NewTLS(tlsConfig)
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.dockerignore b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.dockerignore
new file mode 100644
index 000000000..c795b054e
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.dockerignore
@@ -0,0 +1 @@
+build
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitignore b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitignore
new file mode 100644
index 000000000..82c668079
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitignore
@@ -0,0 +1,34 @@
+# Binaries for programs and plugins
+bin
+build
+results
+_tmp
+.DS_store
+
+# Test binary, build with `go test -c`
+*.test
+
+# Python Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+coverage.*
+
+# Kubernetes Generated files - skip generated files, except for vendored files
+!vendor/**/zz_generated.*
+
+# editor and IDE paraphernalia
+.idea
+.vscode
+*.code-workspace
+*.swp
+*.swo
+*~
+*.orig
+*.go-e
+*.iml
+
+
+
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitlab-ci.yml b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitlab-ci.yml
new file mode 100644
index 000000000..e1c34a9e3
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.gitlab-ci.yml
@@ -0,0 +1,35 @@
+include:
+ - project: "f5/nginx/tools/easy-cicd"
+ ref: "master"
+ file: "include/easy-cicd.yml"
+
+variables:
+ GSG_INITIAL_DEVELOPMENT: "true"
+ GSG_RELEASE_BRANCHES: master
+ GSG_PRE_TMPL: '{{env "CI_PIPELINE_ID"}},{{env "CI_COMMIT_REF_SLUG"}}'
+
+
+stages:
+- checks
+
+Linting:
+ stage: checks
+ extends: .lint-go
+ except:
+ - tags
+
+Fossa-check:
+ stage: checks
+ extends: .fossa-go
+ except:
+ - tags
+
+Unit Tests:
+ stage: checks
+ extends: .unit-test-go
+ except:
+ - tags
+ script:
+ - make test
+
+
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.golangci.yml b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.golangci.yml
new file mode 100644
index 000000000..19f288ad6
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/.golangci.yml
@@ -0,0 +1,84 @@
+# GolangCI-Lint settings
+
+# Disable all linters and enable the required ones
+linters:
+ enable-all: true
+ disable:
+ - goimports # handled by `make format`
+ - gofmt # handled by `make format`
+ - wsl # hyper specific to a the creator and not configurable enough to be useful - https://github.com/bombsimon/wsl
+ - dupl
+ - gomnd
+ - godox
+ - nlreturn
+ - exhaustivestruct
+ - wrapcheck
+ - thelper
+ - testpackage
+ - nestif
+ - gofumpt
+ - goerr113
+ - errorlint
+ - ifshort
+ - paralleltest
+ - interfacer
+ - golint
+ - maligned
+ - scopelint
+
+
+# Run options
+run:
+ # 10 minute timeout for analysis
+ timeout: 10m
+ modules-download-mode: vendor
+ skip-dirs-use-default: true
+
+# Specific linter settings
+linters-settings:
+ gocyclo:
+ # Minimal code complexity to report
+ min-complexity: 16
+ maligned:
+ # Print struct with more effective memory layout
+ suggest-new: true
+ govet:
+ # Report shadowed variables
+ check-shadowing: true
+ misspell:
+ # Correct spellings using locale preferences for US
+ locale: US
+ goimports:
+ # Put imports beginning with prefix after 3rd-party packages
+ local-prefixes: gitswarm.f5net.com/indigo,gitlab.com/f5
+ errcheck:
+ ignore: ^Close.*,os:^Setenv.*,fmt:.*,io/ioutil:^Read.*,github.com/spf13/viper:.*,github.com/pkg/errors:^Wrap.*
+
+ lll:
+ line-length: 140
+
+issues:
+ # Exclude configuration
+ exclude-rules:
+ # Exclude gochecknoinits and gosec from running on tests files
+ - path: _test\.go
+ linters:
+ - gochecknoinits
+ - gosec
+ - path: test/*
+ linters:
+ - gochecknoinits
+ - gosec
+ # Exclude lll issues for long lines with go:generate
+ - linters:
+ - lll
+ source: "^//go:generate "
+ - path: _desiredstatehistory(|_test)\.go
+ linters:
+ - dupl
+
+ # Disable maximum issues count per one linter
+ max-issues-per-linter: 0
+
+ # Disable maximum count of issues with the same text
+ max-same-issues: 0
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/CREDITS b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/CREDITS
new file mode 100644
index 000000000..bbc7632f8
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/CREDITS
@@ -0,0 +1 @@
+This library was originally forked from https://github.com/aluttik/go-crossplane. Credit goes to aluttik (https://github.com/aluttik) and we are grateful for his effort.
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/LICENSE b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/LICENSE
new file mode 100644
index 000000000..98f72179d
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction,
+and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by
+the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all
+other entities that control, are controlled by, or are under common
+control with that entity. For the purposes of this definition,
+"control" means (i) the power, direct or indirect, to cause the
+direction or management of such entity, whether by contract or
+otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity
+exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation
+source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical
+transformation or translation of a Source form, including but
+not limited to compiled object code, generated documentation,
+and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or
+Object form, made available under the License, as indicated by a
+copyright notice that is included in or attached to the work
+(an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object
+form, that is based on (or derived from) the Work and for which the
+editorial revisions, annotations, elaborations, or other modifications
+represent, as a whole, an original work of authorship. For the purposes
+of this License, Derivative Works shall not include works that remain
+separable from, or merely link (or bind by name) to the interfaces of,
+the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including
+the original version of the Work and any modifications or additions
+to that Work or Derivative Works thereof, that is intentionally
+submitted to Licensor for inclusion in the Work by the copyright owner
+or by an individual or Legal Entity authorized to submit on behalf of
+the copyright owner. For the purposes of this definition, "submitted"
+means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems,
+and issue tracking systems that are managed by, or on behalf of, the
+Licensor for the purpose of discussing and improving the Work, but
+excluding communication that is conspicuously marked or otherwise
+designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity
+on behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the
+Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+(except as stated in this section) patent license to make, have made,
+use, offer to sell, sell, import, and otherwise transfer the Work,
+where such license applies only to those patent claims licensable
+by such Contributor that are necessarily infringed by their
+Contribution(s) alone or by combination of their Contribution(s)
+with the Work to which such Contribution(s) was submitted. If You
+institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work
+or a Contribution incorporated within the Work constitutes direct
+or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate
+as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+Work or Derivative Works thereof in any medium, with or without
+modifications, and in Source or Object form, provided that You
+meet the following conditions:
+
+(a) You must give any other recipients of the Work or
+Derivative Works a copy of this License; and
+
+(b) You must cause any modified files to carry prominent notices
+stating that You changed the files; and
+
+(c) You must retain, in the Source form of any Derivative Works
+that You distribute, all copyright, patent, trademark, and
+attribution notices from the Source form of the Work,
+excluding those notices that do not pertain to any part of
+the Derivative Works; and
+
+(d) If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must
+include a readable copy of the attribution notices contained
+within such NOTICE file, excluding those notices that do not
+pertain to any part of the Derivative Works, in at least one
+of the following places: within a NOTICE text file distributed
+as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or,
+within a display generated by the Derivative Works, if and
+wherever such third-party notices normally appear. The contents
+of the NOTICE file are for informational purposes only and
+do not modify the License. You may add Your own attribution
+notices within Derivative Works that You distribute, alongside
+or as an addendum to the NOTICE text from the Work, provided
+that such additional attribution notices cannot be construed
+as modifying the License.
+
+You may add Your own copyright statement to Your modifications and
+may provide additional or different license terms and conditions
+for use, reproduction, or distribution of Your modifications, or
+for any such Derivative Works as a whole, provided Your use,
+reproduction, and distribution of the Work otherwise complies with
+the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+any Contribution intentionally submitted for inclusion in the Work
+by You to the Licensor shall be under the terms and conditions of
+this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify
+the terms of any separate license agreement you may have executed
+with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor,
+except as required for reasonable and customary use in describing the
+origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+agreed to in writing, Licensor provides the Work (and each
+Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied, including, without limitation, any warranties or conditions
+of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+PARTICULAR PURPOSE. You are solely responsible for determining the
+appropriateness of using or redistributing the Work and assume any
+risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+whether in tort (including negligence), contract, or otherwise,
+unless required by applicable law (such as deliberate and grossly
+negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special,
+incidental, or consequential damages of any character arising as a
+result of this License or out of the use or inability to use the
+Work (including but not limited to damages for loss of goodwill,
+work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor
+has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+the Work or Derivative Works thereof, You may choose to offer,
+and charge a fee for, acceptance of support, warranty, indemnity,
+or other liability obligations and/or rights consistent with this
+License. However, in accepting such obligations, You may act only
+on Your own behalf and on Your sole responsibility, not on behalf
+of any other Contributor, and only if You agree to indemnify,
+defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason
+of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following
+boilerplate notice, with the fields enclosed by brackets "{}"
+replaced with your own identifying information. (Don't include
+the brackets!) The text should be enclosed in the appropriate
+comment syntax for the file format. We also recommend that a
+file or class name and description of purpose be included on the
+same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+Copyright 2016 Nginx, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/Makefile b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/Makefile
new file mode 100644
index 000000000..c10774fbb
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/Makefile
@@ -0,0 +1,100 @@
+PACKAGE = $(notdir $(patsubst %/,%,$(dir $(realpath $(lastword $(MAKEFILE_LIST))))))
+OUT_DIR ?= build
+VENDOR_DIR ?= vendor
+RESULTS_DIR ?= results
+DOCKER_REGISTRY ?= local
+DOCKER_TAG ?= latest
+LINT_BIN := ./bin/golangci-lint
+
+SHELL=/bin/bash
+.SHELLFLAGS=-c -eo pipefail
+
+#######################################
+## Local set up.
+#######################################
+
+.PHONY: init deps deps-upgrade fmt test lint lint-shell gen
+
+init:
+ git config core.hooksPath .githooks
+ go get golang.org/x/tools/cmd/goimports@v0.1.10
+ go get github.com/maxbrunsfeld/counterfeiter/v6@latest
+ go get github.com/jstemmer/go-junit-report@latest
+ go install golang.org/x/tools/cmd/goimports
+ go install github.com/maxbrunsfeld/counterfeiter/v6
+ go install github.com/jstemmer/go-junit-report
+
+deps:
+ go mod download
+ go mod tidy
+ go mod verify
+ go mod vendor
+
+deps-upgrade:
+ GOFLAGS="" go get -u ./...
+ $(MAKE) deps
+
+#######################################
+## Tests, codegen, lint and format.
+#######################################
+fmt: $(info Running goimports...)
+ @goimports -w -e $$(find . -type f -name '*.go' -not -path "./vendor/*")
+
+test: $(info Running unit tests...)
+ mkdir -p $(RESULTS_DIR)
+ CGO_ENABLED=1 go test -race -v -cover ./... -coverprofile=$(RESULTS_DIR)/$(PACKAGE)-coverage.out 2>&1 | tee >(go-junit-report > $(RESULTS_DIR)/report.xml)
+ @echo "Total code coverage:"
+ @go tool cover -func=$(RESULTS_DIR)/$(PACKAGE)-coverage.out | grep 'total:' | tee $(RESULTS_DIR)/anybadge.out
+ @go tool cover -html=$(RESULTS_DIR)/$(PACKAGE)-coverage.out -o $(RESULTS_DIR)/coverage.html
+
+test-only-failed: $(info Running unit tests (showing only failed ones with context)...)
+ go test -v -race ./... | grep --color -B 45 -A 5 -E '^FAIL.+'
+
+$(LINT_BIN):
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
+
+lint: $(LINT_BIN)
+ $(LINT_BIN) run
+
+lint-shell:
+ shellcheck -x $$(find . -name "*.sh" -type f -not -path "./vendor/*")
+
+gen:
+ go generate -x ./...
+ $(MAKE) fmt
+
+#######################################
+## Build artifacts for deployment.
+#######################################
+
+.PHONY: build-out-dir build build-linux images dev-k8s clean
+
+build-out-dir:
+ @mkdir -p $(OUT_DIR)
+
+# Builds exectuable
+build: build-out-dir; $(info Building executable...) @
+ CGO_ENABLED=0 go build \
+ -v -tags 'release osusergo' \
+ -ldflags '-s -w -extldflags "-fno-PIC -static"' \
+ -o $(OUT_DIR)/$(PACKAGE) main.go
+
+build-darwin: build-out-dir; $(info Building executable...) @
+ CGO_ENABLED=1 go build \
+ -v -tags 'release osusergo' \
+ -ldflags '-s -w -extldflags "-fno-PIC"' \
+ -o $(OUT_DIR)/$(PACKAGE) main.go
+
+build-linux: export GOOS=linux
+build-linux: export GOARCH=amd64
+build-linux: build
+
+# Removes all build artifacts.
+clean: ; $(info Cleaning...) @
+ rm -rf $(OUT_DIR)/
+
+# Removes all files that could be downloaded/generated
+clean-force: clean; $(info Cleaning everything...) @
+ rm -rf $(VENDOR_DIR)/
+ rm -rf bin/
+ rm -f go.sum
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/README.md b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/README.md
new file mode 100644
index 000000000..0dbb1881a
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/README.md
@@ -0,0 +1,74 @@
+# go-crossplane
+A Go port of the NGINX config/JSON converter [crossplane](https://github.com/nginxinc/crossplane).
+
+## Parse
+This is an example that takes a path to an NGINX config file, converts it to JSON, and prints the result to stdout.
+```go
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/nginxinc/nginx-go-crossplane"
+)
+
+func main() {
+ path := os.Args[1]
+
+ payload, err := crossplane.Parse(path, &crossplane.ParseOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ b, err := json.Marshal(payload)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(string(b))
+}
+```
+
+## Build
+This is an example that takes a path to a JSON file, converts it to an NGINX config, and prints the result to stdout.
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/nginxinc/nginx-go-crossplane"
+)
+
+func main() {
+ path := os.Args[1]
+
+ file, err := os.Open(path)
+ if err != nil {
+ panic(err)
+ }
+
+ content, err := ioutil.ReadAll(file)
+ if err != nil {
+ panic(err)
+ }
+
+ var payload crossplane.Payload
+ if err = json.Unmarshal(content, &payload); err != nil {
+ panic(err)
+ }
+
+ var buf bytes.Buffer
+ if err = crossplane.Build(&buf, payload.Config[0], &crossplane.BuildOptions{}); err != nil {
+ panic(err)
+ }
+
+ fmt.Println(buf.String())
+}
+```
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/analyze.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/analyze.go
new file mode 100644
index 000000000..17550a12b
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/analyze.go
@@ -0,0 +1,2333 @@
+package crossplane
+
+import (
+ "fmt"
+)
+
+// bit masks for different directive argument styles.
+const (
+ ngxConfNoArgs = 0x00000001 // 0 args
+ ngxConfTake1 = 0x00000002 // 1 args
+ ngxConfTake2 = 0x00000004 // 2 args
+ ngxConfTake3 = 0x00000008 // 3 args
+ ngxConfTake4 = 0x00000010 // 4 args
+ ngxConfTake5 = 0x00000020 // 5 args
+ ngxConfTake6 = 0x00000040 // 6 args
+ // ngxConfTake7 = 0x00000080 // 7 args (currently unused).
+ ngxConfBlock = 0x00000100 // followed by block
+ ngxConfExpr = 0x00000200 // directive followed by expression in parentheses `()`
+ ngxConfFlag = 0x00000400 // 'on' or 'off'
+ ngxConfAny = 0x00000800 // >=0 args
+ ngxConf1More = 0x00001000 // >=1 args
+ ngxConf2More = 0x00002000 // >=2 args
+
+ // some helpful argument style aliases.
+ ngxConfTake12 = ngxConfTake1 | ngxConfTake2
+ ngxConfTake13 = ngxConfTake1 | ngxConfTake3
+ ngxConfTake23 = ngxConfTake2 | ngxConfTake3
+ ngxConfTake34 = ngxConfTake3 | ngxConfTake4
+ ngxConfTake123 = ngxConfTake12 | ngxConfTake3
+ ngxConfTake1234 = ngxConfTake123 | ngxConfTake4
+
+ // bit masks for different directive locations.
+ ngxDirectConf = 0x00010000 // main file (not used)
+ ngxMainConf = 0x00040000 // main context
+ ngxEventConf = 0x00080000 // events
+ ngxMailMainConf = 0x00100000 // mail
+ ngxMailSrvConf = 0x00200000 // mail > server
+ ngxStreamMainConf = 0x00400000 // stream
+ ngxStreamSrvConf = 0x00800000 // stream > server
+ ngxStreamUpsConf = 0x01000000 // stream > upstream
+ ngxHTTPMainConf = 0x02000000 // http
+ ngxHTTPSrvConf = 0x04000000 // http > server
+ ngxHTTPLocConf = 0x08000000 // http > location
+ ngxHTTPUpsConf = 0x10000000 // http > upstream
+ ngxHTTPSifConf = 0x20000000 // http > server > if
+ ngxHTTPLifConf = 0x40000000 // http > location > if
+ ngxHTTPLmtConf = 0x80000000 // http > location > limit_except
+)
+
+// helpful directive location alias describing "any" context
+// doesn't include ngxHTTPSifConf, ngxHTTPLifConf, or ngxHTTPLmtConf.
+const ngxAnyConf = ngxMainConf | ngxEventConf | ngxMailMainConf | ngxMailSrvConf |
+ ngxStreamMainConf | ngxStreamSrvConf | ngxStreamUpsConf |
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPUpsConf
+
+// map for getting bitmasks from certain context tuples
+// nolint:gochecknoglobals
+var contexts = map[string]uint{
+ blockCtx{}.key(): ngxMainConf,
+ blockCtx{"events"}.key(): ngxEventConf,
+ blockCtx{"mail"}.key(): ngxMailMainConf,
+ blockCtx{"mail", "server"}.key(): ngxMailSrvConf,
+ blockCtx{"stream"}.key(): ngxStreamMainConf,
+ blockCtx{"stream", "server"}.key(): ngxStreamSrvConf,
+ blockCtx{"stream", "upstream"}.key(): ngxStreamUpsConf,
+ blockCtx{"http"}.key(): ngxHTTPMainConf,
+ blockCtx{"http", "server"}.key(): ngxHTTPSrvConf,
+ blockCtx{"http", "location"}.key(): ngxHTTPLocConf,
+ blockCtx{"http", "upstream"}.key(): ngxHTTPUpsConf,
+ blockCtx{"http", "server", "if"}.key(): ngxHTTPSifConf,
+ blockCtx{"http", "location", "if"}.key(): ngxHTTPLifConf,
+ blockCtx{"http", "location", "limit_except"}.key(): ngxHTTPLmtConf,
+}
+
+func enterBlockCtx(stmt *Directive, ctx blockCtx) blockCtx {
+ // don't nest because ngxHTTPLocConf just means "location block in http"
+ if len(ctx) > 0 && ctx[0] == "http" && stmt.Directive == "location" {
+ return blockCtx{"http", "location"}
+ }
+ // no other block contexts can be nested like location so just append it
+ return append(ctx, stmt.Directive)
+}
+
+// nolint:gocyclo,funlen,gocognit
+func analyze(fname string, stmt *Directive, term string, ctx blockCtx, options *ParseOptions) error {
+ masks, knownDirective := directives[stmt.Directive]
+ currCtx, knownContext := contexts[ctx.key()]
+
+ // if strict and directive isn't recognized then throw error
+ if options.ErrorOnUnknownDirectives && !knownDirective {
+ return &ParseError{
+ What: fmt.Sprintf(`unknown directive "%s"`, stmt.Directive),
+ File: &fname,
+ Line: &stmt.Line,
+ }
+ }
+
+ // if we don't know where this directive is allowed and how
+ // many arguments it can take then don't bother analyzing it
+ if !knownContext || !knownDirective {
+ return nil
+ }
+
+ // if this directive can't be used in this context then throw an error
+ var ctxMasks []uint
+ if options.SkipDirectiveContextCheck {
+ ctxMasks = masks
+ } else {
+ for _, mask := range masks {
+ if (mask & currCtx) != 0 {
+ ctxMasks = append(ctxMasks, mask)
+ }
+ }
+ if len(ctxMasks) == 0 && !options.SkipDirectiveContextCheck {
+ return &ParseError{
+ What: fmt.Sprintf(`"%s" directive is not allowed here`, stmt.Directive),
+ File: &fname,
+ Line: &stmt.Line,
+ }
+ }
+ }
+
+ if options.SkipDirectiveArgsCheck {
+ return nil
+ }
+
+ // do this in reverse because we only throw errors at the end if no masks
+ // are valid, and typically the first bit mask is what the parser expects
+ var what string
+ for i := 0; i < len(ctxMasks); i++ {
+ mask := ctxMasks[i]
+
+ // if the directive is an expression type, there must be '(' 'expr' ')' args
+ if (mask&ngxConfExpr) > 0 && !validExpr(stmt) {
+ what = fmt.Sprintf(`directive "%s"'s is not enclosed in parentheses`, stmt.Directive)
+ continue
+ }
+
+ // if the directive isn't a block but should be according to the mask
+ if (mask&ngxConfBlock) != 0 && term != "{" {
+ what = fmt.Sprintf(`directive "%s" has no opening "{"`, stmt.Directive)
+ continue
+ }
+
+ // if the directive is a block but shouldn't be according to the mask
+ if (mask&ngxConfBlock) == 0 && term != ";" {
+ what = fmt.Sprintf(`directive "%s" is not terminated by ";"`, stmt.Directive)
+ continue
+ }
+
+ // use mask to check the directive's arguments
+ // nolint:gocritic
+ if ((mask>>len(stmt.Args)&1) != 0 && len(stmt.Args) <= 7) || // NOARGS to TAKE7
+ ((mask&ngxConfFlag) != 0 && len(stmt.Args) == 1 && validFlag(stmt.Args[0])) ||
+ ((mask & ngxConfAny) != 0) ||
+ ((mask&ngxConf1More) != 0 && len(stmt.Args) >= 1) ||
+ ((mask&ngxConf2More) != 0 && len(stmt.Args) >= 2) {
+ return nil
+ } else if (mask&ngxConfFlag) != 0 && len(stmt.Args) == 1 && !validFlag(stmt.Args[0]) {
+ what = fmt.Sprintf(`invalid value "%s" in "%s" directive, it must be "on" or "off"`, stmt.Args[0], stmt.Directive)
+ } else {
+ what = fmt.Sprintf(`invalid number of arguments in "%s" directive`, stmt.Directive)
+ }
+ }
+
+ return &ParseError{
+ What: what,
+ File: &fname,
+ Line: &stmt.Line,
+ }
+}
+
+// This dict maps directives to lists of bit masks that define their behavior.
+//
+// Each bit mask describes these behaviors:
+// - how many arguments the directive can take
+// - whether or not it is a block directive
+// - whether this is a flag (takes one argument that's either "on" or "off")
+// - which contexts it's allowed to be in
+//
+// Since some directives can have different behaviors in different contexts, we
+// use lists of bit masks, each describing a valid way to use the directive.
+//
+// Definitions for directives that're available in the open source version of
+// nginx were taken directively from the source code. In fact, the variable
+// names for the bit masks defined above were taken from the nginx source code.
+//
+// Definitions for directives that're only available for nginx+ were inferred
+// from the documentation at http://nginx.org/en/docs/.
+//nolint:gochecknoglobals
+var directives = map[string][]uint{
+ "absolute_redirect": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "accept_mutex": {
+ ngxEventConf | ngxConfFlag,
+ },
+ "accept_mutex_delay": {
+ ngxEventConf | ngxConfTake1,
+ },
+ "access_log": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxHTTPLmtConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "add_after_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "add_before_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "add_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake23,
+ },
+ "add_trailer": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake23,
+ },
+ "addition_types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "aio": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "aio_write": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "alias": {
+ ngxHTTPLocConf | ngxConfTake1,
+ },
+ "allow": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ancient_browser": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "ancient_browser_value": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "auth_basic": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "auth_basic_user_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "auth_delay": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "auth_http": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "auth_Httpheader": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake2,
+ },
+ "auth_Httppass_client_cert": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfFlag,
+ },
+ "auth_Httptimeout": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "auth_request": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "auth_request_set": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "autoindex": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "autoindex_exact_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "autoindex_format": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "autoindex_localtime": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "break": {
+ ngxHTTPSrvConf | ngxHTTPSifConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfNoArgs,
+ },
+ "charset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "charset_map": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfTake2,
+ },
+ "charset_types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "chunked_transfer_encoding": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "client_body_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "client_body_in_file_only": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "client_body_in_single_buffer": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "client_body_temp_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1234,
+ },
+ "client_body_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "client_header_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "client_header_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "client_max_body_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "connection_pool_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "create_full_put_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "daemon": {
+ ngxMainConf | ngxDirectConf | ngxConfFlag,
+ },
+ "dav_access": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake123,
+ },
+ "dav_methods": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "debug_connection": {
+ ngxEventConf | ngxConfTake1,
+ },
+ "debug_points": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "default_type": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "deny": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "directio": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "directio_alignment": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "disable_symlinks": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "empty_gif": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "env": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "error_log": {
+ ngxMainConf | ngxConf1More,
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "error_page": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConf2More,
+ },
+ "etag": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "events": {
+ ngxMainConf | ngxConfBlock | ngxConfNoArgs,
+ },
+ "expires": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake12,
+ },
+ "fastcgi_bind": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "fastcgi_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "fastcgi_busy_buffers_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_background_update": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_cache_bypass": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_cache_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_lock": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_cache_lock_age": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_lock_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_max_range_offset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_methods": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_cache_min_uses": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_path": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "fastcgi_cache_revalidate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_cache_use_stale": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_cache_valid": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_catch_stderr": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_connect_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_force_ranges": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_hide_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_ignore_client_abort": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_ignore_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_index": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_intercept_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_keep_conn": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_limit_rate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_max_temp_file_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_next_upstream": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_next_upStreamtimeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_next_upStreamtries": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_no_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "fastcgi_param": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake23,
+ },
+ "fastcgi_pass": {
+ ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "fastcgi_pass_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_pass_request_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_pass_request_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_read_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_request_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_send_lowat": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_socket_keepalive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "fastcgi_split_path_info": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_store": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_store_access": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake123,
+ },
+ "fastcgi_temp_file_write_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_temp_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1234,
+ },
+ "flv": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "geo": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfTake12,
+ ngxStreamMainConf | ngxConfBlock | ngxConfTake12,
+ },
+ "geoip_city": {
+ ngxHTTPMainConf | ngxConfTake12,
+ ngxStreamMainConf | ngxConfTake12,
+ },
+ "geoip_country": {
+ ngxHTTPMainConf | ngxConfTake12,
+ ngxStreamMainConf | ngxConfTake12,
+ },
+ "geoip_org": {
+ ngxHTTPMainConf | ngxConfTake12,
+ ngxStreamMainConf | ngxConfTake12,
+ },
+ "geoip_proxy": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "geoip_proxy_recursive": {
+ ngxHTTPMainConf | ngxConfFlag,
+ },
+ "google_perftools_profiles": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "grpc_bind": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "grpc_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_connect_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_hide_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ignore_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "grpc_intercept_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "grpc_next_upstream": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "grpc_next_upstream_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_next_upstream_tries": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_pass": {
+ ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "grpc_pass_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_read_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_set_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "grpc_socket_keepalive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "grpc_ssl_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_certificate_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_ciphers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_conf_command": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "grpc_ssl_crl": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_password_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_protocols": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "grpc_ssl_server_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "grpc_ssl_session_reuse": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "grpc_ssl_trusted_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "grpc_ssl_verify": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "grpc_ssl_verify_depth": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "gunzip": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "gunzip_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "gzip": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfFlag,
+ },
+ "gzip_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "gzip_comp_level": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "gzip_disable": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "gzip_http_version": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "gzip_min_length": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "gzip_proxied": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "gzip_static": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "gzip_types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "gzip_vary": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "hash": {
+ ngxHTTPUpsConf | ngxConfTake12,
+ ngxStreamUpsConf | ngxConfTake12,
+ },
+ "http": {
+ ngxMainConf | ngxConfBlock | ngxConfNoArgs,
+ },
+ "http2_body_preread_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_chunk_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "http2_idle_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_max_concurrent_pushes": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_max_concurrent_streams": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_max_field_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_max_header_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_max_requests": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "http2_push": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "http2_push_preload": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "http2_recv_buffer_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "http2_recv_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "if": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfBlock | ngxConfExpr | ngxConf1More,
+ },
+ "if_modified_since": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "ignore_invalid_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "image_filter": {
+ ngxHTTPLocConf | ngxConfTake123,
+ },
+ "image_filter_buffer": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "image_filter_interlace": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "image_filter_jpeg_quality": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "image_filter_sharpen": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "image_filter_transparency": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "image_filter_webp_quality": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "imap_auth": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ },
+ "imap_capabilities": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ },
+ "imap_client_buffer": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "include": {
+ ngxAnyConf | ngxConfTake1,
+ },
+ "index": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "internal": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "ip_hash": {
+ ngxHTTPUpsConf | ngxConfNoArgs,
+ },
+ "keepalive": {
+ ngxHTTPUpsConf | ngxConfTake1,
+ },
+ "keepalive_disable": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "keepalive_requests": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxHTTPUpsConf | ngxConfTake1,
+ },
+ "keepalive_time": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxHTTPUpsConf | ngxConfTake1,
+ },
+ "keepalive_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ ngxHTTPUpsConf | ngxConfTake1,
+ },
+ "large_client_header_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake2,
+ },
+ "least_conn": {
+ ngxHTTPUpsConf | ngxConfNoArgs,
+ ngxStreamUpsConf | ngxConfNoArgs,
+ },
+ "limit_conn": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake2,
+ },
+ "limit_conn_dry_run": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "limit_conn_log_level": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "limit_conn_status": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "limit_conn_zone": {
+ ngxHTTPMainConf | ngxConfTake2,
+ ngxStreamMainConf | ngxConfTake2,
+ },
+ "limit_except": {
+ ngxHTTPLocConf | ngxConfBlock | ngxConf1More,
+ },
+ "limit_rate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "limit_rate_after": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "limit_req": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake123,
+ },
+ "limit_req_dry_run": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "limit_req_log_level": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "limit_req_status": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "limit_req_zone": {
+ ngxHTTPMainConf | ngxConfTake34,
+ },
+ "lingering_close": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "lingering_time": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "lingering_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "listen": {
+ ngxHTTPSrvConf | ngxConf1More,
+ ngxMailSrvConf | ngxConf1More,
+ ngxStreamSrvConf | ngxConf1More,
+ },
+ "load_module": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "location": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfBlock | ngxConfTake12,
+ },
+ "lock_file": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "log_format": {
+ ngxHTTPMainConf | ngxConf2More,
+ ngxStreamMainConf | ngxConf2More,
+ },
+ "log_not_found": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "log_subrequest": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "mail": {
+ ngxMainConf | ngxConfBlock | ngxConfNoArgs,
+ },
+ "map": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfTake2,
+ ngxStreamMainConf | ngxConfBlock | ngxConfTake2,
+ },
+ "map_hash_bucket_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ ngxStreamMainConf | ngxConfTake1,
+ },
+ "map_hash_max_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ ngxStreamMainConf | ngxConfTake1,
+ },
+ "master_process": {
+ ngxMainConf | ngxDirectConf | ngxConfFlag,
+ },
+ "max_ranges": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_bind": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "memcached_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_connect_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_gzip_flag": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_next_upstream": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "memcached_next_upStreamtimeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_next_upStreamtries": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_pass": {
+ ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "memcached_read_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "memcached_socket_keepalive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "merge_slashes": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "min_delete_depth": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "mirror": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "mirror_request_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "modern_browser": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "modern_browser_value": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "mp4": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "mp4_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "mp4_max_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "msie_padding": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "msie_refresh": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "multi_accept": {
+ ngxEventConf | ngxConfFlag,
+ },
+ "open_file_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "open_file_cache_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "open_file_cache_min_uses": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "open_file_cache_valid": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "open_log_file_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1234,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1234,
+ },
+ "output_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "override_charset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfFlag,
+ },
+ "pcre_jit": {
+ ngxMainConf | ngxDirectConf | ngxConfFlag,
+ },
+ "perl": {
+ ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "perl_modules": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "perl_require": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "perl_set": {
+ ngxHTTPMainConf | ngxConfTake2,
+ },
+ "pid": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "pop3_auth": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ },
+ "pop3_capabilities": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ },
+ "port_in_redirect": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "postpone_output": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "preread_buffer_size": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "preread_timeout": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "protocol": {
+ ngxMailSrvConf | ngxConfTake1,
+ },
+ "proxy_bind": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake12,
+ },
+ "proxy_buffer": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "proxy_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "proxy_busy_buffers_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache_background_update": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_cache_bypass": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_cache_convert_head": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_cache_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache_lock": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_cache_lock_age": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache_lock_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache_max_range_offset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache_methods": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_cache_min_uses": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_cache_path": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "proxy_cache_revalidate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_cache_use_stale": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_cache_valid": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_connect_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_cookie_domain": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "proxy_cookie_flags": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_cookie_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "proxy_download_rate": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_force_ranges": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_half_close": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_headers_hash_bucket_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_headers_hash_max_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_hide_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_http_version": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_ignore_client_abort": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_ignore_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_intercept_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_limit_rate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_max_temp_file_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_method": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_next_upstream": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_next_upstream_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_next_upstream_tries": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_no_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "proxy_pass": {
+ ngxHTTPLocConf | ngxHTTPLifConf | ngxHTTPLmtConf | ngxConfTake1,
+ ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_pass_error_message": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfFlag,
+ },
+ "proxy_pass_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_pass_request_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_pass_request_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_protocol": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_protocol_timeout": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_read_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_redirect": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "proxy_request_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "proxy_requests": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_responses": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_send_lowat": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_set_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_set_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "proxy_socket_keepalive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_ssl": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_ssl_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_certificate_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_ciphers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_conf_command": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake2,
+ },
+ "proxy_ssl_crl": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_password_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_protocols": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "proxy_ssl_server_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_ssl_session_reuse": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_ssl_trusted_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_ssl_verify": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "proxy_ssl_verify_depth": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_store": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_store_access": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake123,
+ },
+ "proxy_temp_file_write_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "proxy_temp_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1234,
+ },
+ "proxy_timeout": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "proxy_upload_rate": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "random": {
+ ngxHTTPUpsConf | ngxConfNoArgs | ngxConfTake12,
+ ngxStreamUpsConf | ngxConfNoArgs | ngxConfTake12,
+ },
+ "random_index": {
+ ngxHTTPLocConf | ngxConfFlag,
+ },
+ "read_ahead": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "real_ip_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "real_ip_recursive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "recursive_error_pages": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "referer_hash_bucket_size": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "referer_hash_max_size": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "request_pool_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "reset_timedout_connection": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "resolver": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPUpsConf | ngxConf1More,
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamUpsConf | ngxStreamSrvConf | ngxConf1More,
+ ngxHTTPUpsConf | ngxConf1More,
+ },
+ "resolver_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPUpsConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamUpsConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "return": {
+ ngxHTTPSrvConf | ngxHTTPSifConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake12,
+ ngxStreamSrvConf | ngxConfTake1,
+ },
+ "rewrite": {
+ ngxHTTPSrvConf | ngxHTTPSifConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake23,
+ },
+ "rewrite_log": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPSifConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfFlag,
+ },
+ "root": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "satisfy": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_bind": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "scgi_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "scgi_busy_buffers_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache_background_update": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_cache_bypass": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_cache_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache_lock": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_cache_lock_age": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache_lock_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache_max_range_offset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache_methods": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_cache_min_uses": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_cache_path": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "scgi_cache_revalidate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_cache_use_stale": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_cache_valid": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_connect_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_force_ranges": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_hide_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_ignore_client_abort": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_ignore_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_intercept_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_limit_rate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_max_temp_file_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_next_upstream": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_next_upStreamtimeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_next_upStreamtries": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_no_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "scgi_param": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake23,
+ },
+ "scgi_pass": {
+ ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "scgi_pass_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_pass_request_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_pass_request_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_read_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_request_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_socket_keepalive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "scgi_store": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_store_access": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake123,
+ },
+ "scgi_temp_file_write_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "scgi_temp_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1234,
+ },
+ "secure_link": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "secure_link_md5": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "secure_link_secret": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "send_lowat": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "sendfile": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfFlag,
+ },
+ "sendfile_max_chunk": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "server": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfNoArgs,
+ ngxHTTPUpsConf | ngxConf1More,
+ ngxMailMainConf | ngxConfBlock | ngxConfNoArgs,
+ ngxStreamMainConf | ngxConfBlock | ngxConfNoArgs,
+ ngxStreamUpsConf | ngxConf1More,
+ },
+ "server_name": {
+ ngxHTTPSrvConf | ngxConf1More,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "server_name_in_redirect": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "server_names_hash_bucket_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "server_names_hash_max_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "server_tokens": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "set": {
+ ngxHTTPSrvConf | ngxHTTPSifConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake2,
+ ngxStreamSrvConf | ngxConfTake2,
+ },
+ "set_real_ip_from": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "slice": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "smtp_auth": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ },
+ "smtp_capabilities": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ },
+ "smtp_client_buffer": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "smtp_greeting_delay": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "source_charset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "spdy_chunk_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "spdy_headers_comp": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "split_clients": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfTake2,
+ ngxStreamMainConf | ngxConfBlock | ngxConfTake2,
+ },
+ "ssi": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfFlag,
+ },
+ "ssi_last_modified": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "ssi_min_file_chunk": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "ssi_silent_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "ssi_types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "ssi_value_length": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "ssl": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfFlag,
+ },
+ "ssl_alpn": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "ssl_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "ssl_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_certificate_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_ciphers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_client_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_conf_command": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake2,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake2,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake2,
+ },
+ "ssl_crl": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_dhparam": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_early_data": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "ssl_ecdh_curve": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_engine": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "ssl_handshake_timeout": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_ocsp": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "ssl_ocsp_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "ssl_ocsp_responder": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "ssl_password_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_prefer_server_ciphers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "ssl_preread": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "ssl_protocols": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConf1More,
+ ngxMailMainConf | ngxMailSrvConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "ssl_reject_handshake": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "ssl_session_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake12,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake12,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake12,
+ },
+ "ssl_session_ticket_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_session_tickets": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "ssl_session_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_stapling": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "ssl_stapling_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "ssl_stapling_responder": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ },
+ "ssl_stapling_verify": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "ssl_trusted_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_verify_client": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "ssl_verify_depth": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfTake1,
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "starttls": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "stream": {
+ ngxMainConf | ngxConfBlock | ngxConfNoArgs,
+ },
+ "stub_status": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfNoArgs | ngxConfTake1,
+ },
+ "sub_filter": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "sub_filter_last_modified": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "sub_filter_once": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "sub_filter_types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "subrequest_output_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "tcp_nodelay": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "tcp_nopush": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "thread_pool": {
+ ngxMainConf | ngxDirectConf | ngxConfTake23,
+ },
+ "timeout": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfTake1,
+ },
+ "timer_resolution": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "try_files": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf2More,
+ },
+ "types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfBlock | ngxConfNoArgs,
+ },
+ "types_hash_bucket_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "types_hash_max_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "underscores_in_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxConfFlag,
+ },
+ "uninitialized_variable_warn": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPSifConf | ngxHTTPLocConf | ngxHTTPLifConf | ngxConfFlag,
+ },
+ "upstream": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfTake1,
+ ngxStreamMainConf | ngxConfBlock | ngxConfTake1,
+ },
+ "upstream_conf": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "use": {
+ ngxEventConf | ngxConfTake1,
+ },
+ "user": {
+ ngxMainConf | ngxDirectConf | ngxConfTake12,
+ },
+ "userid": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_domain": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_expires": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_flags": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_mark": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_p3p": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "userid_service": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_bind": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "uwsgi_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "uwsgi_busy_buffers_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_background_update": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_bypass": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_cache_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_lock": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_cache_lock_age": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_lock_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_max_range_offset": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_methods": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_cache_min_uses": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_cache_path": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "uwsgi_cache_revalidate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_cache_use_stale": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_cache_valid": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_connect_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_force_ranges": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_hide_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ignore_client_abort": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_ignore_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_intercept_errors": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_limit_rate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_max_temp_file_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_modifier1": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_modifier2": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_next_upstream": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_next_upStreamtimeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_next_upStreamtries": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_no_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_param": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake23,
+ },
+ "uwsgi_pass": {
+ ngxHTTPLocConf | ngxHTTPLifConf | ngxConfTake1,
+ },
+ "uwsgi_pass_header": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_pass_request_body": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_pass_request_headers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_read_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_request_buffering": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_send_timeout": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_socket_keepalive": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_ssl_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_certificate_key": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_ciphers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_crl": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_password_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_protocols": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "uwsgi_ssl_server_name": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_ssl_session_reuse": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_ssl_trusted_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_ssl_verify": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "uwsgi_ssl_verify_depth": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_store": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_store_access": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake123,
+ },
+ "uwsgi_temp_file_write_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "uwsgi_temp_path": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1234,
+ },
+ "valid_referers": {
+ ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "variables_hash_bucket_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ ngxStreamMainConf | ngxConfTake1,
+ },
+ "variables_hash_max_size": {
+ ngxHTTPMainConf | ngxConfTake1,
+ ngxStreamMainConf | ngxConfTake1,
+ },
+ "worker_aio_requests": {
+ ngxEventConf | ngxConfTake1,
+ },
+ "worker_connections": {
+ ngxEventConf | ngxConfTake1,
+ },
+ "worker_cpu_affinity": {
+ ngxMainConf | ngxDirectConf | ngxConf1More,
+ },
+ "worker_priority": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "worker_processes": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "worker_rlimit_core": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "worker_rlimit_nofile": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "worker_shutdown_timeout": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "working_directory": {
+ ngxMainConf | ngxDirectConf | ngxConfTake1,
+ },
+ "xclient": {
+ ngxMailMainConf | ngxMailSrvConf | ngxConfFlag,
+ },
+ "xml_entities": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "xslt_last_modified": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "xslt_param": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "xslt_string_param": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "xslt_stylesheet": {
+ ngxHTTPLocConf | ngxConf1More,
+ },
+ "xslt_types": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "zone": {
+ ngxHTTPUpsConf | ngxConfTake12,
+ ngxStreamUpsConf | ngxConfTake12,
+ },
+
+ // nginx+ directives [definitions inferred from docs]
+ "api": {
+ ngxHTTPLocConf | ngxConfNoArgs | ngxConfTake1,
+ },
+ "auth_jwt": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake12,
+ },
+ "auth_jwt_claim_set": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "auth_jwt_header_set": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "auth_jwt_key_cache": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "auth_jwt_key_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "auth_jwt_key_request": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "auth_jwt_leeway": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "auth_jwt_type": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "f4f": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "f4f_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "fastcgi_cache_purge": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "health_check": {
+ ngxHTTPLocConf | ngxConfAny,
+ ngxStreamSrvConf | ngxConfAny,
+ },
+ "health_check_timeout": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "hls": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "hls_buffers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "hls_forward_args": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "hls_fragment": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "hls_mp4_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "hls_mp4_max_buffer_size": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "js_access": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "js_body_filter": {
+ ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "js_content": {
+ ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "js_fetch_ciphers": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "js_fetch_protocols": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "js_fetch_trusted_certificate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "js_fetch_verify_depth": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "js_filter": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "js_header_filter": {
+ ngxHTTPLocConf | ngxHTTPLmtConf | ngxConfTake1,
+ },
+ "js_import": {
+ ngxHTTPMainConf | ngxConfTake13,
+ ngxStreamMainConf | ngxConfTake13,
+ },
+ "js_include": {
+ ngxHTTPMainConf | ngxConfTake1,
+ ngxStreamMainConf | ngxConfTake1,
+ },
+ "js_path": {
+ ngxHTTPMainConf | ngxConfTake1,
+ ngxStreamMainConf | ngxConfTake1,
+ },
+ "js_preread": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "js_set": {
+ ngxHTTPMainConf | ngxConfTake2,
+ ngxStreamMainConf | ngxConfTake2,
+ },
+ "js_var": {
+ ngxHTTPMainConf | ngxConfTake12,
+ ngxStreamMainConf | ngxConfTake12,
+ },
+ "keyval": {
+ ngxHTTPMainConf | ngxConfTake3,
+ ngxStreamMainConf | ngxConfTake3,
+ },
+ "keyval_zone": {
+ ngxHTTPMainConf | ngxConf1More,
+ ngxStreamMainConf | ngxConf1More,
+ },
+ "least_time": {
+ ngxHTTPUpsConf | ngxConfTake12,
+ ngxStreamUpsConf | ngxConfTake12,
+ },
+ "limit_zone": {
+ ngxHTTPMainConf | ngxConfTake3,
+ },
+ "match": {
+ ngxHTTPMainConf | ngxConfBlock | ngxConfTake1,
+ ngxStreamMainConf | ngxConfBlock | ngxConfTake1,
+ },
+ "memcached_force_ranges": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "mp4_limit_rate": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "mp4_limit_rate_after": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "mp4_start_key_frame": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "ntlm": {
+ ngxHTTPUpsConf | ngxConfNoArgs,
+ },
+ "proxy_cache_purge": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "queue": {
+ ngxHTTPUpsConf | ngxConfTake12,
+ },
+ "scgi_cache_purge": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "session_log": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "session_log_format": {
+ ngxHTTPMainConf | ngxConf2More,
+ },
+ "session_log_zone": {
+ ngxHTTPMainConf | ngxConfTake23 | ngxConfTake4 | ngxConfTake5 | ngxConfTake6,
+ },
+ "state": {
+ ngxHTTPUpsConf | ngxConfTake1,
+ ngxStreamUpsConf | ngxConfTake1,
+ },
+ "status": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "status_format": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake12,
+ },
+ "status_zone": {
+ ngxHTTPSrvConf | ngxConfTake1,
+ ngxStreamSrvConf | ngxConfTake1,
+ ngxHTTPLocConf | ngxConfTake1,
+ ngxHTTPLifConf | ngxConfTake1,
+ },
+ "sticky": {
+ ngxHTTPUpsConf | ngxConf1More,
+ },
+ "sticky_cookie_insert": {
+ ngxHTTPUpsConf | ngxConfTake1234,
+ },
+ "upStreamconf": {
+ ngxHTTPLocConf | ngxConfNoArgs,
+ },
+ "uwsgi_cache_purge": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConf1More,
+ },
+ "zone_sync": {
+ ngxStreamSrvConf | ngxConfNoArgs,
+ },
+ "zone_sync_buffers": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake2,
+ },
+ "zone_sync_connect_retry_interval": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_connect_timeout": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_interval": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_recv_buffer_size": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_server": {
+ ngxStreamSrvConf | ngxConfTake12,
+ },
+ "zone_sync_ssl": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "zone_sync_ssl_certificate": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_certificate_key": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_ciphers": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_crl": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_name": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_password_file": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_protocols": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConf1More,
+ },
+ "zone_sync_ssl_server_name": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "zone_sync_ssl_trusted_certificate": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_ssl_verify": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfFlag,
+ },
+ "zone_sync_ssl_verify_depth": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+ "zone_sync_timeout": {
+ ngxStreamMainConf | ngxStreamSrvConf | ngxConfTake1,
+ },
+
+ // nginx app protect specific and global directives
+ // [https://docs.nginx.com/nginx-app-protect/configuration-guide/configuration/#directives]
+ "app_protect_compressed_requests_action": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "app_protect_cookie_seed": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "app_protect_cpu_thresholds": {
+ ngxHTTPMainConf | ngxConfTake2,
+ },
+ "app_protect_enable": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "app_protect_failure_mode_action": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "app_protect_physical_memory_util_thresholds": {
+ ngxHTTPMainConf | ngxConfTake2,
+ },
+ "app_protect_policy_file": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake1,
+ },
+ "app_protect_reconnect_period_seconds": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "app_protect_request_buffer_overflow_action": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+ "app_protect_security_log_enable": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfFlag,
+ },
+ "app_protect_security_log": {
+ ngxHTTPMainConf | ngxHTTPSrvConf | ngxHTTPLocConf | ngxConfTake2,
+ },
+ "app_protect_user_defined_signatures": {
+ ngxHTTPMainConf | ngxConfTake1,
+ },
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/buffer.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/buffer.go
new file mode 100644
index 000000000..1ec37d19b
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/buffer.go
@@ -0,0 +1,51 @@
+package crossplane
+
+import (
+ "io"
+ "strings"
+)
+
+// Creator abstracts file creation (to write configs to something other than files).
+type Creator interface {
+ Create(string) (io.WriteCloser, error)
+ Reset()
+}
+
+// FileString is a string representation of a file.
+type FileString struct {
+ Name string
+ w strings.Builder
+}
+
+// Write makes this an io.Writer.
+func (fs *FileString) Write(b []byte) (int, error) {
+ return fs.w.Write(b)
+}
+
+// Close makes this an io.Closer.
+func (fs *FileString) Close() error {
+ fs.w.WriteByte('\n')
+ return nil
+}
+
+// String makes this a Stringer.
+func (fs *FileString) String() string {
+ return fs.w.String()
+}
+
+// StringsCreator is an option for rendering config files to strings(s).
+type StringsCreator struct {
+ Files []*FileString
+}
+
+// Create makes this a Creator.
+func (sc *StringsCreator) Create(file string) (io.WriteCloser, error) {
+ wc := &FileString{Name: file}
+ sc.Files = append(sc.Files, wc)
+ return wc, nil
+}
+
+// Reset returns the Creator to its initial state.
+func (sc *StringsCreator) Reset() {
+ sc.Files = []*FileString{}
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/build.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/build.go
new file mode 100644
index 000000000..23167840a
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/build.go
@@ -0,0 +1,284 @@
+package crossplane
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type BuildOptions struct {
+ Indent int
+ Tabs bool
+ Header bool
+}
+
+const MaxIndent = 100
+
+// nolint:gochecknoglobals
+var (
+ marginSpaces = strings.Repeat(" ", MaxIndent)
+ marginTabs = strings.Repeat("\t", MaxIndent)
+)
+
+const header = `# This config was built from JSON using NGINX crossplane.
+# If you encounter any bugs please report them here:
+# https://github.com/nginxinc/crossplane/issues
+
+`
+
+// BuildFiles builds all of the config files in a crossplane.Payload and
+// writes them to disk.
+func BuildFiles(payload Payload, dir string, options *BuildOptions) error {
+ if dir == "" {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+ dir = cwd
+ }
+
+ for _, config := range payload.Config {
+ path := config.File
+ if !filepath.IsAbs(path) {
+ path = filepath.Join(dir, path)
+ }
+
+ // make directories that need to be made for the config to be built
+ dirpath := filepath.Dir(path)
+ if err := os.MkdirAll(dirpath, os.ModeDir|os.ModePerm); err != nil {
+ return err
+ }
+
+ // build then create the nginx config file using the json payload
+ var buf bytes.Buffer
+ if err := Build(&buf, config, options); err != nil {
+ return err
+ }
+
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+
+ output := append(bytes.TrimRightFunc(buf.Bytes(), unicode.IsSpace), '\n')
+ if _, err := f.Write(output); err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Build creates an NGINX config from a crossplane.Config.
+func Build(w io.Writer, config Config, options *BuildOptions) error {
+ if options.Indent == 0 {
+ options.Indent = 4
+ }
+
+ if options.Header {
+ _, err := w.Write([]byte(header))
+ if err != nil {
+ return err
+ }
+ }
+
+ body := strings.Builder{}
+ buildBlock(&body, nil, config.Parsed, 0, 0, options)
+
+ bodyStr := body.String()
+ if len(bodyStr) > 0 && bodyStr[len(bodyStr)-1] == '\n' {
+ bodyStr = bodyStr[:len(bodyStr)-1]
+ }
+
+ _, err := w.Write([]byte(bodyStr))
+ return err
+}
+
+//nolint:gocognit
+func buildBlock(sb io.StringWriter, parent *Directive, block Directives, depth int, lastLine int, options *BuildOptions) {
+ for i, stmt := range block {
+ // if the this statement is a comment on the same line as the preview, do not emit EOL for this stmt
+ if stmt.Line == lastLine && stmt.IsComment() {
+ _, _ = sb.WriteString(" #")
+ _, _ = sb.WriteString(*stmt.Comment)
+ // sb.WriteString("\n")
+ continue
+ }
+
+ if i != 0 || parent != nil {
+ _, _ = sb.WriteString("\n")
+ }
+
+ _, _ = sb.WriteString(margin(options, depth))
+
+ if stmt.IsComment() {
+ _, _ = sb.WriteString("#")
+ _, _ = sb.WriteString(*stmt.Comment)
+ } else {
+ directive := Enquote(stmt.Directive)
+ _, _ = sb.WriteString(directive)
+
+ // special handling for if statements
+ if directive == "if" {
+ _, _ = sb.WriteString(" (")
+ for i, arg := range stmt.Args {
+ if i > 0 {
+ _, _ = sb.WriteString(" ")
+ }
+ _, _ = sb.WriteString(Enquote(arg))
+ }
+ _, _ = sb.WriteString(")")
+ } else {
+ for _, arg := range stmt.Args {
+ _, _ = sb.WriteString(" ")
+ _, _ = sb.WriteString(Enquote(arg))
+ }
+ }
+
+ if !stmt.IsBlock() {
+ _, _ = sb.WriteString(";")
+ } else {
+ _, _ = sb.WriteString(" {")
+ stmt := stmt
+ buildBlock(sb, stmt, stmt.Block, depth+1, stmt.Line, options)
+ _, _ = sb.WriteString("\n")
+ _, _ = sb.WriteString(margin(options, depth))
+ _, _ = sb.WriteString("}")
+ }
+ }
+ lastLine = stmt.Line
+ }
+}
+func margin(options *BuildOptions, depth int) string {
+ indent := depth * options.Indent
+ if indent < MaxIndent {
+ if options.Tabs {
+ return marginTabs[:depth]
+ }
+ return marginSpaces[:indent]
+ }
+
+ if options.Tabs {
+ return strings.Repeat("\t", depth)
+ }
+ return strings.Repeat(" ", options.Indent*depth)
+}
+
+func Enquote(arg string) string {
+ if !needsQuote(arg) {
+ return arg
+ }
+ return strings.ReplaceAll(repr(arg), `\\`, `\`)
+}
+
+// nolint:gocyclo,gocognit
+func needsQuote(s string) bool {
+ if s == "" {
+ return true
+ }
+
+ // lexer should throw an error when variable expansion syntax
+ // is messed up, but just wrap it in quotes for now I guess
+ var char rune
+ chars := escape(s)
+
+ if len(chars) == 0 {
+ return true
+ }
+
+ // get first rune
+ char, off := utf8.DecodeRune([]byte(chars))
+
+ // arguments can't start with variable expansion syntax
+ if unicode.IsSpace(char) || strings.ContainsRune("{};\"'", char) || strings.HasPrefix(chars, "${") {
+ return true
+ }
+
+ chars = chars[off:]
+
+ expanding := false
+ var prev rune = 0
+ for _, c := range chars {
+ char = c
+
+ if prev == '\\' {
+ prev = 0
+ continue
+ }
+ if unicode.IsSpace(char) || strings.ContainsRune("{;\"'", char) {
+ return true
+ }
+
+ if (expanding && (prev == '$' && char == '{')) || (!expanding && char == '}') {
+ return true
+ }
+
+ if (expanding && char == '}') || (!expanding && (prev == '$' && char == '{')) {
+ expanding = !expanding
+ }
+
+ prev = char
+ }
+
+ return expanding || char == '\\' || char == '$'
+}
+
+func escape(s string) string {
+ if !strings.ContainsAny(s, "{}$;\\") {
+ return s
+ }
+
+ sb := strings.Builder{}
+ var pc, cc rune
+
+ for _, r := range s {
+ cc = r
+ if pc == '\\' || (pc == '$' && cc == '{') {
+ sb.WriteRune(pc)
+ sb.WriteRune(cc)
+ pc = 0
+ continue
+ }
+
+ if pc == '$' {
+ sb.WriteRune(pc)
+ }
+ if cc != '\\' && cc != '$' {
+ sb.WriteRune(cc)
+ }
+ pc = cc
+ }
+
+ if cc == '\\' || cc == '$' {
+ sb.WriteRune(cc)
+ }
+
+ return sb.String()
+}
+
+// BuildInto builds all of the config files in a crossplane.Payload and
+// writes them to the Creator.
+func BuildInto(payload *Payload, into Creator, options *BuildOptions) error {
+ for _, config := range payload.Config {
+ wc, err := into.Create(config.File)
+ if err != nil {
+ return err
+ }
+ if err := Build(wc, config, options); err != nil {
+ return err
+ }
+
+ if err := wc.Close(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/errors.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/errors.go
new file mode 100644
index 000000000..3f0caa4cc
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/errors.go
@@ -0,0 +1,32 @@
+package crossplane
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type ParseError struct {
+ What string
+ File *string
+ Line *int
+ originalErr error
+}
+
+func (e *ParseError) Error() string {
+ file := "(nofile)"
+ if e.File != nil {
+ file = *e.File
+ }
+ if e.Line != nil {
+ return fmt.Sprintf("%s in %s:%d", e.What, file, *e.Line)
+ }
+ return fmt.Sprintf("%s in %s", e.What, file)
+}
+
+func (e *ParseError) MarshalJSON() ([]byte, error) {
+ return json.Marshal(e.Error())
+}
+
+func (e *ParseError) Unwrap() error {
+ return e.originalErr
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/lex.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/lex.go
new file mode 100644
index 000000000..3af46ba67
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/lex.go
@@ -0,0 +1,222 @@
+package crossplane
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+type NgxToken struct {
+ Value string
+ Line int
+ IsQuoted bool
+ Error error
+}
+
+type state int
+
+const (
+ skipSpace state = iota
+ inWord
+ inComment
+ inVar
+ inQuote
+)
+
+const TokenChanCap = 2048
+
+// nolint:gochecknoglobals
+var lexerFile = "lexer" // pseudo file name for use by parse errors
+
+// nolint:gochecknoglobals
+var tokChanCap = TokenChanCap // capacity of lexer token channel
+
+// note: this is only used during tests, should not be changed
+func SetTokenChanCap(size int) {
+ tokChanCap = size
+}
+func Lex(reader io.Reader) chan NgxToken {
+ tc := make(chan NgxToken, tokChanCap)
+ go tokenize(reader, tc)
+ return tc
+}
+
+// nolint:gocyclo,funlen,gocognit
+func tokenize(reader io.Reader, tokenCh chan NgxToken) {
+ token := strings.Builder{}
+ tokenLine := 1
+ tokenStartLine := 1
+
+ lexState := skipSpace
+ newToken := false
+ dupSpecialChar := false
+ readNext := true
+ esc := false
+ depth := 0
+ var la, quote string
+
+ scanner := bufio.NewScanner(reader)
+ scanner.Split(bufio.ScanRunes)
+
+ emit := func(line int, quoted bool, err error) {
+ tokenCh <- NgxToken{Value: token.String(), Line: line, IsQuoted: quoted, Error: err}
+ token.Reset()
+ lexState = skipSpace
+ }
+
+ for {
+ if readNext {
+ if !scanner.Scan() {
+ break // done
+ }
+
+ la = scanner.Text()
+ if isEOL(la) {
+ tokenLine++
+ }
+ } else {
+ readNext = true
+ }
+
+ // skip CRs
+ if la == "\r" || la == "\\\r" {
+ continue
+ }
+
+ if la == "\\" && !esc {
+ esc = true
+ continue
+ }
+ if esc {
+ esc = false
+ la = "\\" + la
+ }
+
+ switch lexState {
+ case skipSpace:
+ if !isSpace(la) {
+ lexState = inWord
+ newToken = true
+ readNext = false // re-eval
+ tokenStartLine = tokenLine
+ }
+ continue
+ case inWord:
+ if newToken {
+ newToken = false
+ if la == "#" {
+ token.WriteString(la)
+ lexState = inComment
+ tokenStartLine = tokenLine
+ continue
+ }
+ }
+
+ if isSpace(la) {
+ emit(tokenStartLine, false, nil)
+ continue
+ }
+
+ // handle parameter expansion syntax (ex: "${var[@]}")
+ if token.Len() > 0 && strings.HasSuffix(token.String(), "$") && la == "{" {
+ token.WriteString(la)
+ lexState = inVar
+ dupSpecialChar = false
+ continue
+ }
+
+ // if a quote is found, add the whole string to the token buffer
+ if la == `"` || la == "'" {
+ if token.Len() > 0 {
+ // if a quote is inside a token, treat it like any other char
+ token.WriteString(la)
+ } else {
+ // swallow quote and change state
+ quote = la
+ lexState = inQuote
+ tokenStartLine = tokenLine
+ }
+ dupSpecialChar = false
+ continue
+ }
+
+ // handle special characters that are treated like full tokens
+ if la == "{" || la == "}" || la == ";" {
+ // if token complete yield it and reset token buffer
+ if token.Len() > 0 {
+ emit(tokenStartLine, false, nil)
+ }
+
+ // only '}' can be repeated
+ if dupSpecialChar && la != "}" {
+ emit(tokenStartLine, false, &ParseError{
+ File: &lexerFile,
+ What: fmt.Sprintf(`unexpected "%s"`, la),
+ Line: &tokenLine,
+ })
+ close(tokenCh)
+ return
+ }
+
+ dupSpecialChar = true
+
+ if la == "{" {
+ depth++
+ }
+ if la == "}" {
+ depth--
+ // early exit if unbalanced braces
+ if depth < 0 {
+ emit(tokenStartLine, false, &ParseError{File: &lexerFile, What: `unexpected "}"`, Line: &tokenLine})
+ close(tokenCh)
+ return
+ }
+ }
+
+ token.WriteString(la)
+ // this character is a full token so emit it
+ emit(tokenStartLine, false, nil)
+ continue
+ }
+
+ dupSpecialChar = false
+ token.WriteString(la)
+
+ case inComment:
+ if isEOL(la) {
+ emit(tokenStartLine, false, nil)
+ continue
+ }
+ token.WriteString(la)
+
+ case inVar:
+ // this is using the same logic as the exiting lexer, but this is wrong since it does not terminate on token boundary
+ if !strings.HasSuffix(token.String(), "}") && !isSpace(la) {
+ token.WriteString(la)
+ continue
+ }
+ token.WriteString(la)
+ lexState = inWord
+
+ case inQuote:
+ if la == quote {
+ emit(tokenStartLine, true, nil)
+ continue
+ }
+ if la == "\\"+quote {
+ la = quote
+ }
+ token.WriteString(la)
+ }
+ }
+
+ if token.Len() > 0 {
+ emit(tokenStartLine, lexState == inQuote, nil)
+ }
+ if depth > 0 {
+ emit(tokenStartLine, false, &ParseError{File: &lexerFile, What: `unexpected end of file, expecting "}"`, Line: &tokenLine})
+ }
+
+ close(tokenCh)
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/parse.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/parse.go
new file mode 100644
index 000000000..4fa6ba3fe
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/parse.go
@@ -0,0 +1,454 @@
+package crossplane
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// nolint:gochecknoglobals
+var (
+ hasMagic = regexp.MustCompile(`[*?[]`)
+ osOpen = func(path string) (io.Reader, error) { return os.Open(path) }
+ ErrPrematureLexEnd = errors.New("premature end of file")
+)
+
+type blockCtx []string
+
+func (c blockCtx) key() string {
+ return strings.Join(c, ">")
+}
+
+type fileCtx struct {
+ path string
+ ctx blockCtx
+}
+
+type parser struct {
+ configDir string
+ options *ParseOptions
+ handleError func(*Config, error)
+ includes []fileCtx
+ included map[string]int
+ includeEdges map[string][]string
+ includeInDegree map[string]int
+}
+
+// ParseOptions determine the behavior of an NGINX config parse.
+type ParseOptions struct {
+ // An array of directives to skip over and not include in the payload.
+ IgnoreDirectives []string
+
+ // If an error is found while parsing, it will be passed to this callback
+ // function. The results of the callback function will be set in the
+ // PayloadError struct that's added to the Payload struct's Errors array.
+ ErrorCallback func(error) interface{}
+
+ // If specified, use this alternative to open config files
+ Open func(path string) (io.Reader, error)
+
+ // Glob will return a matching list of files if specified
+ Glob func(path string) ([]string, error)
+
+ // If true, parsing will stop immediately if an error is found.
+ StopParsingOnError bool
+
+ // If true, include directives are used to combine all of the Payload's
+ // Config structs into one.
+ CombineConfigs bool
+
+ // If true, only the config file with the given filename will be parsed
+ // and Parse will not parse files included files.
+ SingleFile bool
+
+ // If true, comments will be parsed and added to the resulting Payload.
+ ParseComments bool
+
+ // If true, add an error to the payload when encountering a directive that
+ // is unrecognized. The unrecognized directive will not be included in the
+ // resulting Payload.
+ ErrorOnUnknownDirectives bool
+
+ // If true, checks that directives are in valid contexts.
+ SkipDirectiveContextCheck bool
+
+ // If true, checks that directives have a valid number of arguments.
+ SkipDirectiveArgsCheck bool
+}
+
+// Parse parses an NGINX configuration file.
+//nolint:funlen
+func Parse(filename string, options *ParseOptions) (*Payload, error) {
+ payload := &Payload{
+ Status: "ok",
+ Errors: []PayloadError{},
+ Config: []Config{},
+ }
+ if options.Glob == nil {
+ options.Glob = filepath.Glob
+ }
+
+ handleError := func(config *Config, err error) {
+ var line *int
+ if e, ok := err.(*ParseError); ok {
+ line = e.Line
+ }
+ cerr := ConfigError{Line: line, Error: err}
+ perr := PayloadError{Line: line, Error: err, File: config.File}
+ if options.ErrorCallback != nil {
+ perr.Callback = options.ErrorCallback(err)
+ }
+
+ const failedSts = "failed"
+ config.Status = failedSts
+ config.Errors = append(config.Errors, cerr)
+
+ payload.Status = failedSts
+ payload.Errors = append(payload.Errors, perr)
+ }
+
+ // Start with the main nginx config file/context.
+ p := parser{
+ configDir: filepath.Dir(filename),
+ options: options,
+ handleError: handleError,
+ includes: []fileCtx{{path: filename, ctx: blockCtx{}}},
+ included: map[string]int{filename: 0},
+ // adjacency list where an edge exists between a file and the file it includes
+ includeEdges: map[string][]string{},
+ // number of times a file is included by another file
+ includeInDegree: map[string]int{filename: 0},
+ }
+
+ for len(p.includes) > 0 {
+ incl := p.includes[0]
+ p.includes = p.includes[1:]
+
+ file, err := p.openFile(incl.path)
+ if err != nil {
+ return nil, err
+ }
+
+ tokens := Lex(file)
+ config := Config{
+ File: incl.path,
+ Status: "ok",
+ Errors: []ConfigError{},
+ Parsed: Directives{},
+ }
+ parsed, err := p.parse(&config, tokens, incl.ctx, false)
+ if err != nil {
+ if options.StopParsingOnError {
+ return nil, err
+ }
+ handleError(&config, err)
+ } else {
+ config.Parsed = parsed
+ }
+
+ payload.Config = append(payload.Config, config)
+ }
+
+ if p.isAcyclic() {
+ return nil, errors.New("configs contain include cycle")
+ }
+
+ if options.CombineConfigs {
+ return payload.Combined()
+ }
+
+ return payload, nil
+}
+
+func (p *parser) openFile(path string) (io.Reader, error) {
+ open := osOpen
+ if p.options.Open != nil {
+ open = p.options.Open
+ }
+ return open(path)
+}
+
+// parse Recursively parses directives from an nginx config context.
+// nolint:gocyclo,funlen,gocognit
+func (p *parser) parse(parsing *Config, tokens <-chan NgxToken, ctx blockCtx, consume bool) (parsed Directives, err error) {
+ var tokenOk bool
+ // parse recursively by pulling from a flat stream of tokens
+ for t := range tokens {
+ if t.Error != nil {
+ var perr *ParseError
+ if errors.As(t.Error, &perr) {
+ perr.File = &parsing.File
+ return nil, perr
+ }
+ return nil, &ParseError{
+ What: t.Error.Error(),
+ File: &parsing.File,
+ Line: &t.Line,
+ originalErr: t.Error,
+ }
+ }
+
+ var commentsInArgs []string
+
+ // we are parsing a block, so break if it's closing
+ if t.Value == "}" && !t.IsQuoted {
+ break
+ }
+
+ // if we are consuming, then just continue until end of context
+ if consume {
+ // if we find a block inside this context, consume it too
+ if t.Value == "{" && !t.IsQuoted {
+ _, _ = p.parse(parsing, tokens, nil, true)
+ }
+ continue
+ }
+
+ var fileName string
+ if p.options.CombineConfigs {
+ fileName = parsing.File
+ }
+
+ // the first token should always be an nginx directive
+ stmt := &Directive{
+ Directive: t.Value,
+ Line: t.Line,
+ Args: []string{},
+ File: fileName,
+ }
+
+ // if token is comment
+ if strings.HasPrefix(t.Value, "#") && !t.IsQuoted {
+ if p.options.ParseComments {
+ comment := t.Value[1:]
+ stmt.Directive = "#"
+ stmt.Comment = &comment
+ parsed = append(parsed, stmt)
+ }
+ continue
+ }
+
+ // parse arguments by reading tokens
+ t, tokenOk = <-tokens
+ if !tokenOk {
+ return nil, &ParseError{
+ What: ErrPrematureLexEnd.Error(),
+ File: &parsing.File,
+ Line: &stmt.Line,
+ originalErr: ErrPrematureLexEnd,
+ }
+ }
+ for t.IsQuoted || (t.Value != "{" && t.Value != ";" && t.Value != "}") {
+ if strings.HasPrefix(t.Value, "#") && !t.IsQuoted {
+ commentsInArgs = append(commentsInArgs, t.Value[1:])
+ } else {
+ stmt.Args = append(stmt.Args, t.Value)
+ }
+ t, tokenOk = <-tokens
+ if !tokenOk {
+ return nil, &ParseError{
+ What: ErrPrematureLexEnd.Error(),
+ File: &parsing.File,
+ Line: &stmt.Line,
+ originalErr: ErrPrematureLexEnd,
+ }
+ }
+ }
+
+ // if inside map or geo block - add contents to payload, but do not parse further
+ if len(ctx) > 0 && (ctx[len(ctx)-1] == "map" || ctx[len(ctx)-1] == "charset_map" || ctx[len(ctx)-1] == "geo") {
+ mapErr := analyzeMapContents(parsing.File, stmt, t.Value)
+ if mapErr != nil && p.options.StopParsingOnError {
+ return nil, mapErr
+ } else if mapErr != nil {
+ p.handleError(parsing, mapErr)
+ // consume invalid block
+ if t.Value == "{" && !t.IsQuoted {
+ _, _ = p.parse(parsing, tokens, nil, true)
+ }
+ continue
+ }
+ parsed = append(parsed, stmt)
+ continue
+ }
+
+ // consume the directive if it is ignored and move on
+ if contains(p.options.IgnoreDirectives, stmt.Directive) {
+ // if this directive was a block consume it too
+ if t.Value == "{" && !t.IsQuoted {
+ _, _ = p.parse(parsing, tokens, nil, true)
+ }
+ continue
+ }
+
+ // raise errors if this statement is invalid
+ err = analyze(parsing.File, stmt, t.Value, ctx, p.options)
+
+ if perr, ok := err.(*ParseError); ok && !p.options.StopParsingOnError {
+ p.handleError(parsing, perr)
+ // if it was a block but shouldn"t have been then consume
+ if strings.HasSuffix(perr.What, ` is not terminated by ";"`) {
+ if t.Value != "}" && !t.IsQuoted {
+ _, _ = p.parse(parsing, tokens, nil, true)
+ } else {
+ break
+ }
+ }
+ // keep on parsin'
+ continue
+ } else if err != nil {
+ return nil, err
+ }
+
+ // prepare arguments - strip parentheses
+ if stmt.Directive == "if" {
+ stmt = prepareIfArgs(stmt)
+ }
+
+ // add "includes" to the payload if this is an include statement
+ if !p.options.SingleFile && stmt.Directive == "include" {
+ if len(stmt.Args) == 0 {
+ return nil, &ParseError{
+ What: fmt.Sprintf(`invalid number of arguments in "%s" directive in %s:%d`,
+ stmt.Directive,
+ parsing.File,
+ stmt.Line,
+ ),
+ File: &parsing.File,
+ Line: &stmt.Line,
+ }
+ }
+
+ pattern := stmt.Args[0]
+ if !filepath.IsAbs(pattern) {
+ pattern = filepath.Join(p.configDir, pattern)
+ }
+
+ // get names of all included files
+ var fnames []string
+ if hasMagic.MatchString(pattern) {
+ fnames, err = p.options.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(fnames)
+ } else {
+ // if the file pattern was explicit, nginx will check
+ // that the included file can be opened and read
+ if f, err := p.openFile(pattern); err != nil {
+ perr := &ParseError{
+ What: err.Error(),
+ File: &parsing.File,
+ Line: &stmt.Line,
+ }
+ if !p.options.StopParsingOnError {
+ p.handleError(parsing, perr)
+ } else {
+ return nil, perr
+ }
+ } else {
+ if c, ok := f.(io.Closer); ok {
+ _ = c.Close()
+ }
+ fnames = []string{pattern}
+ }
+ }
+
+ for _, fname := range fnames {
+ // the included set keeps files from being parsed twice
+ // TODO: handle files included from multiple contexts
+ if _, ok := p.included[fname]; !ok {
+ p.included[fname] = len(p.included)
+ p.includes = append(p.includes, fileCtx{fname, ctx})
+ }
+ stmt.Includes = append(stmt.Includes, p.included[fname])
+ // add edge between the current file and it's included file and
+ // increase the included file's in degree
+ p.includeEdges[parsing.File] = append(p.includeEdges[parsing.File], fname)
+ p.includeInDegree[fname]++
+ }
+ }
+
+ // if this statement terminated with "{" then it is a block
+ if t.Value == "{" && !t.IsQuoted {
+ stmt.Block = make(Directives, 0)
+ inner := enterBlockCtx(stmt, ctx) // get context for block
+ blocks, err := p.parse(parsing, tokens, inner, false)
+ if err != nil {
+ return nil, err
+ }
+ stmt.Block = append(stmt.Block, blocks...)
+ }
+
+ parsed = append(parsed, stmt)
+
+ // add all comments found inside args after stmt is added
+ for _, comment := range commentsInArgs {
+ comment := comment
+ parsed = append(parsed, &Directive{
+ Directive: "#",
+ Line: stmt.Line,
+ Args: []string{},
+ File: fileName,
+ Comment: &comment,
+ })
+ }
+ }
+
+ return parsed, nil
+}
+
+// isAcyclic performs a topological sort to check if there are cycles created by configs' includes.
+// First, it adds any files who are not being referenced by another file to a queue (in degree of 0).
+// For every file in the queue, it will remove the reference it has towards its neighbors.
+// At the end, if the queue is empty but not all files were once in the queue,
+// then files still exist with references, and therefore, a cycle is present.
+func (p *parser) isAcyclic() bool {
+ // add to queue if file is not being referenced by any other file
+ var queue []string
+ for k, v := range p.includeInDegree {
+ if v == 0 {
+ queue = append(queue, k)
+ }
+ }
+ fileCount := 0
+ for len(queue) > 0 {
+ // dequeue
+ file := queue[0]
+ queue = queue[1:]
+ fileCount++
+
+ // decrease each neighbor's in degree
+ neighbors := p.includeEdges[file]
+ for _, f := range neighbors {
+ p.includeInDegree[f]--
+ if p.includeInDegree[f] == 0 {
+ queue = append(queue, f)
+ }
+ }
+ }
+ return fileCount != len(p.includeInDegree)
+}
+
+func analyzeMapContents(fname string, stmt *Directive, term string) error {
+ if term != ";" {
+ return &ParseError{
+ What: fmt.Sprintf(`unexpected "%s"`, term),
+ File: &fname,
+ Line: &stmt.Line,
+ }
+ }
+ if len(stmt.Args) != 1 && stmt.Directive != "ranges" {
+ return &ParseError{
+ What: "invalid number of parameters",
+ File: &fname,
+ Line: &stmt.Line,
+ }
+ }
+ return nil
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/types.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/types.go
new file mode 100644
index 000000000..8c01dcdba
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/types.go
@@ -0,0 +1,134 @@
+package crossplane
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Payload struct {
+ Status string `json:"status"`
+ Errors []PayloadError `json:"errors"`
+ Config []Config `json:"config"`
+}
+
+type PayloadError struct {
+ File string `json:"file"`
+ Line *int `json:"line"`
+ Error error `json:"error"`
+ Callback interface{} `json:"callback,omitempty"`
+}
+
+type Config struct {
+ File string `json:"file"`
+ Status string `json:"status"`
+ Errors []ConfigError `json:"errors"`
+ Parsed Directives `json:"parsed"`
+}
+
+type ConfigError struct {
+ Line *int `json:"line"`
+ Error error `json:"error"`
+}
+
+type Directive struct {
+ Directive string `json:"directive"`
+ Line int `json:"line"`
+ Args []string `json:"args"`
+ File string `json:"file,omitempty"`
+ Includes []int `json:"includes,omitempty"`
+ Block Directives `json:"block,omitempty"`
+ Comment *string `json:"comment,omitempty"`
+}
+type Directives []*Directive
+
+// IsBlock returns true if this is a block directive.
+func (d Directive) IsBlock() bool {
+ return d.Block != nil
+}
+
+// IsInclude returns true if this is an include directive.
+func (d Directive) IsInclude() bool {
+ return d.Directive == "include" && d.Includes != nil
+}
+
+// IsComment returns true iff the directive represents a comment.
+func (d Directive) IsComment() bool {
+ return d.Directive == "#" && d.Comment != nil
+}
+
+func equals(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, x := range a {
+ if x != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// strPtrEqual returns true if the content of the provided string pointer are equal.
+func strPtrEqual(a, b *string) bool {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return *a == *b
+}
+
+// Equal returns true if both blocks are functionally equivalent.
+func (d *Directive) Equal(a *Directive) bool {
+ if d == a {
+ // same ptr, or both nil
+ return true
+ }
+ if d == nil || a == nil {
+ return false
+ }
+ switch {
+ case a.Directive != d.Directive:
+ return false
+ case !equals(a.Args, d.Args):
+ return false
+ case len(a.Block) != len(d.Block):
+ return false
+ case len(a.Includes) != len(d.Includes):
+ return false
+ case !strPtrEqual(a.Comment, d.Comment):
+ return false
+ case a.Line != d.Line:
+ return false
+ case a.File != d.File:
+ return false
+ }
+ for i, inc := range a.Includes {
+ if inc != d.Includes[i] {
+ return false
+ }
+ }
+ for i, dir := range a.Block {
+ if !dir.Equal(d.Block[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// String makes this a Stringer, returning a string representation of the Directive. The string representation is a
+// peak at the content of the Directive, does not represent a valid config rendering of the Directive in question.
+func (d *Directive) String() string {
+ if len(d.Block) == 0 {
+ return fmt.Sprintf("%s %s", d.Directive, strings.Join(d.Args, " "))
+ }
+ return fmt.Sprintf("%s %s {...}", d.Directive, strings.Join(d.Args, " "))
+}
+
+// Combined returns a new Payload that is the same except that the inluding
+// logic is performed on its configs. This means that the resulting Payload
+// will always have 0 or 1 configs in its Config field.
+func (p *Payload) Combined() (*Payload, error) {
+ return combineConfigs(p)
+}
diff --git a/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/util.go b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/util.go
new file mode 100644
index 000000000..679dddefc
--- /dev/null
+++ b/test/integration/vendor/github.com/nginxinc/nginx-go-crossplane/util.go
@@ -0,0 +1,164 @@
+package crossplane
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+type included struct {
+ directive *Directive
+ err error
+}
+
+func contains(xs []string, x string) bool {
+ for _, s := range xs {
+ if s == x {
+ return true
+ }
+ }
+ return false
+}
+
+func isSpace(s string) bool {
+ return len(strings.TrimSpace(s)) == 0
+}
+
+func isEOL(s string) bool {
+ return strings.HasSuffix(s, "\n")
+}
+
+func repr(s string) string {
+ q := fmt.Sprintf("%q", s)
+ for _, char := range s {
+ if char == '"' {
+ q = strings.ReplaceAll(q, `\"`, `"`)
+ q = strings.ReplaceAll(q, `'`, `\'`)
+ q = `'` + q[1:len(q)-1] + `'`
+ return q
+ }
+ }
+ return q
+}
+
+func validFlag(s string) bool {
+ l := strings.ToLower(s)
+ return l == "on" || l == "off"
+}
+
+// validExpr ensures an expression is enclused in '(' and ')' and is not empty.
+func validExpr(d *Directive) bool {
+ l := len(d.Args)
+ b := 0
+ e := l - 1
+
+ return l > 0 &&
+ strings.HasPrefix(d.Args[b], "(") &&
+ strings.HasSuffix(d.Args[e], ")") &&
+ ((l == 1 && len(d.Args[b]) > 2) || // empty expression single arg '()'
+ (l == 2 && (len(d.Args[b]) > 1 || len(d.Args[e]) > 1)) || // empty expression two args '(', ')'
+ (l > 2))
+}
+
+// prepareIfArgs removes parentheses from an `if` directive's arguments.
+func prepareIfArgs(d *Directive) *Directive {
+ b := 0
+ e := len(d.Args) - 1
+ if len(d.Args) > 0 && strings.HasPrefix(d.Args[0], "(") && strings.HasSuffix(d.Args[e], ")") {
+ d.Args[0] = strings.TrimLeftFunc(strings.TrimPrefix(d.Args[0], "("), unicode.IsSpace)
+ d.Args[e] = strings.TrimRightFunc(strings.TrimSuffix(d.Args[e], ")"), unicode.IsSpace)
+ if len(d.Args[0]) == 0 {
+ b++
+ }
+ if len(d.Args[e]) == 0 {
+ e--
+ }
+ d.Args = d.Args[b : e+1]
+ }
+ return d
+}
+
+// combineConfigs combines config files into one by using include directives.
+func combineConfigs(old *Payload) (*Payload, error) {
+ if len(old.Config) < 1 {
+ return old, nil
+ }
+
+ status := old.Status
+ if status == "" {
+ status = "ok"
+ }
+
+ errors := old.Errors
+ if errors == nil {
+ errors = []PayloadError{}
+ }
+
+ combined := Config{
+ File: old.Config[0].File,
+ Status: "ok",
+ Errors: []ConfigError{},
+ Parsed: Directives{},
+ }
+
+ for _, config := range old.Config {
+ combined.Errors = append(combined.Errors, config.Errors...)
+ if config.Status == "failed" {
+ combined.Status = "failed"
+ }
+ }
+
+ for incl := range performIncludes(old, combined.File, old.Config[0].Parsed) {
+ if incl.err != nil {
+ return nil, incl.err
+ }
+ combined.Parsed = append(combined.Parsed, incl.directive)
+ }
+
+ return &Payload{
+ Status: status,
+ Errors: errors,
+ Config: []Config{combined},
+ }, nil
+}
+
+func performIncludes(old *Payload, fromfile string, block Directives) chan included {
+ c := make(chan included)
+ go func() {
+ defer close(c)
+ for _, d := range block {
+ dir := *d
+ if dir.IsBlock() {
+ nblock := Directives{}
+ for incl := range performIncludes(old, fromfile, dir.Block) {
+ if incl.err != nil {
+ c <- incl
+ return
+ }
+ nblock = append(nblock, incl.directive)
+ }
+ dir.Block = nblock
+ }
+ if !dir.IsInclude() {
+ c <- included{directive: &dir}
+ continue
+ }
+ for _, idx := range dir.Includes {
+ if idx >= len(old.Config) {
+ c <- included{
+ err: &ParseError{
+ What: fmt.Sprintf("include config with index: %d", idx),
+ File: &fromfile,
+ Line: &dir.Line,
+ },
+ }
+ return
+ }
+ for incl := range performIncludes(old, old.Config[idx].File, old.Config[idx].Parsed) {
+ c <- incl
+ }
+ }
+ }
+ }()
+ return c
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/test/integration/vendor/github.com/pelletier/go-toml/v2/.dockerignore
new file mode 100644
index 000000000..7b5883475
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/.dockerignore
@@ -0,0 +1,2 @@
+cmd/tomll/tomll
+cmd/tomljson/tomljson
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/test/integration/vendor/github.com/pelletier/go-toml/v2/.gitattributes
new file mode 100644
index 000000000..34a0a21a3
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/.gitattributes
@@ -0,0 +1,4 @@
+* text=auto
+
+benchmark/benchmark.toml text eol=lf
+testdata/** text eol=lf
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/.gitignore b/test/integration/vendor/github.com/pelletier/go-toml/v2/.gitignore
new file mode 100644
index 000000000..a69e2b0eb
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/.gitignore
@@ -0,0 +1,6 @@
+test_program/test_program_bin
+fuzz/
+cmd/tomll/tomll
+cmd/tomljson/tomljson
+cmd/tomltestgen/tomltestgen
+dist
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/test/integration/vendor/github.com/pelletier/go-toml/v2/.golangci.toml
new file mode 100644
index 000000000..067db5517
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/.golangci.toml
@@ -0,0 +1,84 @@
+[service]
+golangci-lint-version = "1.39.0"
+
+[linters-settings.wsl]
+allow-assign-and-anything = true
+
+[linters-settings.exhaustive]
+default-signifies-exhaustive = true
+
+[linters]
+disable-all = true
+enable = [
+ "asciicheck",
+ "bodyclose",
+ "cyclop",
+ "deadcode",
+ "depguard",
+ "dogsled",
+ "dupl",
+ "durationcheck",
+ "errcheck",
+ "errorlint",
+ "exhaustive",
+ # "exhaustivestruct",
+ "exportloopref",
+ "forbidigo",
+ # "forcetypeassert",
+ "funlen",
+ "gci",
+ # "gochecknoglobals",
+ "gochecknoinits",
+ "gocognit",
+ "goconst",
+ "gocritic",
+ "gocyclo",
+ "godot",
+ "godox",
+ # "goerr113",
+ "gofmt",
+ "gofumpt",
+ "goheader",
+ "goimports",
+ "golint",
+ "gomnd",
+ # "gomoddirectives",
+ "gomodguard",
+ "goprintffuncname",
+ "gosec",
+ "gosimple",
+ "govet",
+ # "ifshort",
+ "importas",
+ "ineffassign",
+ "lll",
+ "makezero",
+ "misspell",
+ "nakedret",
+ "nestif",
+ "nilerr",
+ # "nlreturn",
+ "noctx",
+ "nolintlint",
+ #"paralleltest",
+ "prealloc",
+ "predeclared",
+ "revive",
+ "rowserrcheck",
+ "sqlclosecheck",
+ "staticcheck",
+ "structcheck",
+ "stylecheck",
+ # "testpackage",
+ "thelper",
+ "tparallel",
+ "typecheck",
+ "unconvert",
+ "unparam",
+ "unused",
+ "varcheck",
+ "wastedassign",
+ "whitespace",
+ # "wrapcheck",
+ # "wsl"
+]
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/test/integration/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
new file mode 100644
index 000000000..3aa1840ec
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
@@ -0,0 +1,123 @@
+before:
+ hooks:
+ - go mod tidy
+ - go fmt ./...
+ - go test ./...
+builds:
+ - id: tomll
+ main: ./cmd/tomll
+ binary: tomll
+ env:
+ - CGO_ENABLED=0
+ flags:
+ - -trimpath
+ ldflags:
+ - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
+ mod_timestamp: '{{ .CommitTimestamp }}'
+ targets:
+ - linux_amd64
+ - linux_arm64
+ - linux_arm
+ - windows_amd64
+ - windows_arm64
+ - windows_arm
+ - darwin_amd64
+ - darwin_arm64
+ - id: tomljson
+ main: ./cmd/tomljson
+ binary: tomljson
+ env:
+ - CGO_ENABLED=0
+ flags:
+ - -trimpath
+ ldflags:
+ - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
+ mod_timestamp: '{{ .CommitTimestamp }}'
+ targets:
+ - linux_amd64
+ - linux_arm64
+ - linux_arm
+ - windows_amd64
+ - windows_arm64
+ - windows_arm
+ - darwin_amd64
+ - darwin_arm64
+ - id: jsontoml
+ main: ./cmd/jsontoml
+ binary: jsontoml
+ env:
+ - CGO_ENABLED=0
+ flags:
+ - -trimpath
+ ldflags:
+ - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
+ mod_timestamp: '{{ .CommitTimestamp }}'
+ targets:
+ - linux_amd64
+ - linux_arm64
+ - linux_arm
+ - windows_amd64
+ - windows_arm64
+ - windows_arm
+ - darwin_amd64
+ - darwin_arm64
+universal_binaries:
+ - id: tomll
+ replace: true
+ name_template: tomll
+ - id: tomljson
+ replace: true
+ name_template: tomljson
+ - id: jsontoml
+ replace: true
+ name_template: jsontoml
+archives:
+- id: jsontoml
+ format: tar.xz
+ builds:
+ - jsontoml
+ files:
+ - none*
+ name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
+- id: tomljson
+ format: tar.xz
+ builds:
+ - tomljson
+ files:
+ - none*
+ name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
+- id: tomll
+ format: tar.xz
+ builds:
+ - tomll
+ files:
+ - none*
+ name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
+dockers:
+ - id: tools
+ goos: linux
+ goarch: amd64
+ ids:
+ - jsontoml
+ - tomljson
+ - tomll
+ image_templates:
+ - "ghcr.io/pelletier/go-toml:latest"
+ - "ghcr.io/pelletier/go-toml:{{ .Tag }}"
+ - "ghcr.io/pelletier/go-toml:v{{ .Major }}"
+ skip_push: false
+checksum:
+ name_template: 'sha256sums.txt'
+snapshot:
+ name_template: "{{ incpatch .Version }}-next"
+release:
+ github:
+ owner: pelletier
+ name: go-toml
+ draft: true
+ prerelease: auto
+ mode: replace
+changelog:
+ use: github-native
+announce:
+ skip: true
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/test/integration/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
new file mode 100644
index 000000000..04dd12bcb
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
@@ -0,0 +1,196 @@
+# Contributing
+
+Thank you for your interest in go-toml! We appreciate you considering
+contributing to go-toml!
+
+The main goal is the project is to provide an easy-to-use and efficient TOML
+implementation for Go that gets the job done and gets out of your way – dealing
+with TOML is probably not the central piece of your project.
+
+As the single maintainer of go-toml, time is scarce. All help, big or small, is
+more than welcomed!
+
+## Ask questions
+
+Any question you may have, somebody else might have it too. Always feel free to
+ask them on the [discussion board][discussions]. We will try to answer them as
+clearly and quickly as possible, time permitting.
+
+Asking questions also helps us identify areas where the documentation needs
+improvement, or new features that weren't envisioned before. Sometimes, a
+seemingly innocent question leads to the fix of a bug. Don't hesitate and ask
+away!
+
+[discussions]: https://github.com/pelletier/go-toml/discussions
+
+## Improve the documentation
+
+The best way to share your knowledge and experience with go-toml is to improve
+the documentation. Fix a typo, clarify an interface, add an example, anything
+goes!
+
+The documentation is present in the [README][readme] and thorough the source
+code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change
+to the documentation, create a pull request with your proposed changes. For
+simple changes like that, the easiest way to go is probably the "Fork this
+project and edit the file" button on Github, displayed at the top right of the
+file. Unless it's a trivial change (for example a typo), provide a little bit of
+context in your pull request description or commit message.
+
+## Report a bug
+
+Found a bug! Sorry to hear that :(. Help us and other track them down and fix by
+reporting it. [File a new bug report][bug-report] on the [issues
+tracker][issues-tracker]. The template should provide enough guidance on what to
+include. When in doubt: add more details! By reducing ambiguity and providing
+more information, it decreases back and forth and saves everyone time.
+
+## Code changes
+
+Want to contribute a patch? Very happy to hear that!
+
+First, some high-level rules:
+
+- A short proposal with some POC code is better than a lengthy piece of text
+ with no code. Code speaks louder than words. That being said, bigger changes
+ should probably start with a [discussion][discussions].
+- No backward-incompatible patch will be accepted unless discussed. Sometimes
+ it's hard, but we try not to break people's programs unless we absolutely have
+ to.
+- If you are writing a new feature or extending an existing one, make sure to
+ write some documentation.
+- Bug fixes need to be accompanied with regression tests.
+- New code needs to be tested.
+- Your commit messages need to explain why the change is needed, even if already
+ included in the PR description.
+
+It does sound like a lot, but those best practices are here to save time overall
+and continuously improve the quality of the project, which is something everyone
+benefits from.
+
+### Get started
+
+The fairly standard code contribution process looks like that:
+
+1. [Fork the project][fork].
+2. Make your changes, commit on any branch you like.
+3. [Open up a pull request][pull-request]
+4. Review, potential ask for changes.
+5. Merge.
+
+Feel free to ask for help! You can create draft pull requests to gather
+some early feedback!
+
+### Run the tests
+
+You can run tests for go-toml using Go's test tool: `go test -race ./...`.
+
+During the pull request process, all tests will be ran on Linux, Windows, and
+MacOS on the last two versions of Go.
+
+However, given GitHub's new policy to _not_ run Actions on pull requests until a
+maintainer clicks on button, it is highly recommended that you run them locally
+as you make changes.
+
+### Check coverage
+
+We use `go tool cover` to compute test coverage. Most code editors have a way to
+run and display code coverage, but at the end of the day, we do this:
+
+```
+go test -covermode=atomic -coverprofile=coverage.out
+go tool cover -func=coverage.out
+```
+
+and verify that the overall percentage of tested code does not go down. This is
+a requirement. As a rule of thumb, all lines of code touched by your changes
+should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your
+code lowers the coverage.
+
+### Verify performance
+
+Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's
+builtin benchmark systems. Because of their noisy nature, containers provided by
+Github Actions cannot be reliably used for benchmarking. As a result, you are
+responsible for checking that your changes do not incur a performance penalty.
+You can run their following to execute benchmarks:
+
+```
+go test ./... -bench=. -count=10
+```
+
+Benchmark results should be compared against each other with
+[benchstat][benchstat]. Typical flow looks like this:
+
+1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to
+ a file (for example `old.txt`).
+2. Make some code changes.
+3. Run `go test ....` again, and save the output to an other file (for example
+ `new.txt`).
+4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any
+ test.
+
+On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts
+performance.
+
+It is highly encouraged to add the benchstat results to your pull request
+description. Pull requests that lower performance will receive more scrutiny.
+
+[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat
+
+### Style
+
+Try to look around and follow the same format and structure as the rest of the
+code. We enforce using `go fmt` on the whole code base.
+
+---
+
+## Maintainers-only
+
+### Merge pull request
+
+Checklist:
+
+- Passing CI.
+- Does not introduce backward-incompatible changes (unless discussed).
+- Has relevant doc changes.
+- Benchstat does not show performance regression.
+- Pull request is [labeled appropriately][pr-labels].
+- Title will be understandable in the changelog.
+
+1. Merge using "squash and merge".
+2. Make sure to edit the commit message to keep all the useful information
+ nice and clean.
+3. Make sure the commit title is clear and contains the PR number (#123).
+
+### New release
+
+1. Decide on the next version number. Use semver.
+2. Generate release notes using [`gh`][gh]. Example:
+```
+$ gh api -X POST \
+ -F tag_name='v2.0.0-beta.5' \
+ -F target_commitish='v2' \
+ -F previous_tag_name='v2.0.0-beta.4' \
+ --jq '.body' \
+ repos/pelletier/go-toml/releases/generate-notes
+```
+3. Look for "Other changes". That would indicate a pull request not labeled
+ properly. Tweak labels and pull request titles until changelog looks good for
+ users.
+4. [Draft new release][new-release].
+5. Fill tag and target with the same value used to generate the changelog.
+6. Set title to the new tag value.
+7. Paste the generated changelog.
+8. Check "create discussion", in the "Releases" category.
+9. Check pre-release if new version is an alpha or beta.
+
+[issues-tracker]: https://github.com/pelletier/go-toml/issues
+[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
+[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
+[readme]: ./README.md
+[fork]: https://help.github.com/articles/fork-a-repo
+[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
+[new-release]: https://github.com/pelletier/go-toml/releases/new
+[gh]: https://github.com/cli/cli
+[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/test/integration/vendor/github.com/pelletier/go-toml/v2/Dockerfile
new file mode 100644
index 000000000..b9e933237
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/Dockerfile
@@ -0,0 +1,5 @@
+FROM scratch
+ENV PATH "$PATH:/bin"
+COPY tomll /bin/tomll
+COPY tomljson /bin/tomljson
+COPY jsontoml /bin/jsontoml
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/LICENSE b/test/integration/vendor/github.com/pelletier/go-toml/v2/LICENSE
new file mode 100644
index 000000000..6839d51cd
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md b/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md
new file mode 100644
index 000000000..a63c3a796
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md
@@ -0,0 +1,552 @@
+# go-toml v2
+
+Go library for the [TOML](https://toml.io/en/) format.
+
+This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0).
+
+[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
+
+[💬 Anything else](https://github.com/pelletier/go-toml/discussions)
+
+## Documentation
+
+Full API, examples, and implementation notes are available in the Go
+documentation.
+
+[](https://pkg.go.dev/github.com/pelletier/go-toml/v2)
+
+## Import
+
+```go
+import "github.com/pelletier/go-toml/v2"
+```
+
+See [Modules](#Modules).
+
+## Features
+
+### Stdlib behavior
+
+As much as possible, this library is designed to behave similarly as the
+standard library's `encoding/json`.
+
+### Performance
+
+While go-toml favors usability, it is written with performance in mind. Most
+operations should not be shockingly slow. See [benchmarks](#benchmarks).
+
+### Strict mode
+
+`Decoder` can be set to "strict mode", which makes it error when some parts of
+the TOML document was not present in the target structure. This is a great way
+to check for typos. [See example in the documentation][strict].
+
+[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields
+
+### Contextualized errors
+
+When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]),
+which contains a human readable contextualized version of the error. For
+example:
+
+```
+2| key1 = "value1"
+3| key2 = "missing2"
+ | ~~~~ missing field
+4| key3 = "missing3"
+5| key4 = "value4"
+```
+
+[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError
+
+### Local date and time support
+
+TOML supports native [local date/times][ldt]. It allows to represent a given
+date, time, or date-time without relation to a timezone or offset. To support
+this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and
+[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`,
+making them convenient yet unambiguous structures for their respective TOML
+representation.
+
+[ldt]: https://toml.io/en/v1.0.0#local-date-time
+[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate
+[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime
+[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime
+
+## Getting started
+
+Given the following struct, let's see how to read it and write it as TOML:
+
+```go
+type MyConfig struct {
+ Version int
+ Name string
+ Tags []string
+}
+```
+
+### Unmarshaling
+
+[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its
+content. For example:
+
+```go
+doc := `
+version = 2
+name = "go-toml"
+tags = ["go", "toml"]
+`
+
+var cfg MyConfig
+err := toml.Unmarshal([]byte(doc), &cfg)
+if err != nil {
+ panic(err)
+}
+fmt.Println("version:", cfg.Version)
+fmt.Println("name:", cfg.Name)
+fmt.Println("tags:", cfg.Tags)
+
+// Output:
+// version: 2
+// name: go-toml
+// tags: [go toml]
+```
+
+[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal
+
+### Marshaling
+
+[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure
+as a TOML document:
+
+```go
+cfg := MyConfig{
+ Version: 2,
+ Name: "go-toml",
+ Tags: []string{"go", "toml"},
+}
+
+b, err := toml.Marshal(cfg)
+if err != nil {
+ panic(err)
+}
+fmt.Println(string(b))
+
+// Output:
+// Version = 2
+// Name = 'go-toml'
+// Tags = ['go', 'toml']
+```
+
+[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal
+
+## Benchmarks
+
+Execution time speedup compared to other Go TOML libraries:
+
+
+
+ Benchmark go-toml v1 BurntSushi/toml
+
+
+ Marshal/HugoFrontMatter-2 1.9x 1.9x
+ Marshal/ReferenceFile/map-2 1.7x 1.8x
+ Marshal/ReferenceFile/struct-2 2.2x 2.5x
+ Unmarshal/HugoFrontMatter-2 2.9x 2.9x
+ Unmarshal/ReferenceFile/map-2 2.6x 2.9x
+ Unmarshal/ReferenceFile/struct-2 4.4x 5.3x
+
+
+See more
+The table above has the results of the most common use-cases. The table below
+contains the results of all benchmarks, including unrealistic ones. It is
+provided for completeness.
+
+
+
+ Benchmark go-toml v1 BurntSushi/toml
+
+
+ Marshal/SimpleDocument/map-2 1.8x 2.9x
+ Marshal/SimpleDocument/struct-2 2.7x 4.2x
+ Unmarshal/SimpleDocument/map-2 4.5x 3.1x
+ Unmarshal/SimpleDocument/struct-2 6.2x 3.9x
+ UnmarshalDataset/example-2 3.1x 3.5x
+ UnmarshalDataset/code-2 2.3x 3.1x
+ UnmarshalDataset/twitter-2 2.5x 2.6x
+ UnmarshalDataset/citm_catalog-2 2.1x 2.2x
+ UnmarshalDataset/canada-2 1.6x 1.3x
+ UnmarshalDataset/config-2 4.3x 3.2x
+ [Geo mean] 2.7x 2.8x
+
+
+This table can be generated with ./ci.sh benchmark -a -html.
+
+
+## Modules
+
+go-toml uses Go's standard modules system.
+
+Installation instructions:
+
+- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals
+ with it automatically.
+- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`.
+
+In case of trouble: [Go Modules FAQ][mod-faq].
+
+[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module
+
+## Tools
+
+Go-toml provides three handy command line tools:
+
+ * `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+ ```
+ $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
+ $ tomljson --help
+ ```
+
+ * `jsontoml`: Reads a JSON file and outputs a TOML representation.
+
+ ```
+ $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
+ $ jsontoml --help
+ ```
+
+ * `tomll`: Lints and reformats a TOML file.
+
+ ```
+ $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
+ $ tomll --help
+ ```
+
+### Docker image
+
+Those tools are also available as a [Docker image][docker]. For example, to use
+`tomljson`:
+
+```
+docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml
+```
+
+Multiple versions are availble on [ghcr.io][docker].
+
+[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml
+
+## Migrating from v1
+
+This section describes the differences between v1 and v2, with some pointers on
+how to get the original behavior when possible.
+
+### Decoding / Unmarshal
+
+#### Automatic field name guessing
+
+When unmarshaling to a struct, if a key in the TOML document does not exactly
+match the name of a struct field or any of the `toml`-tagged field, v1 tries
+multiple variations of the key ([code][v1-keys]).
+
+V2 instead does a case-insensitive matching, like `encoding/json`.
+
+This could impact you if you are relying on casing to differentiate two fields,
+and one of them is a not using the `toml` struct tag. The recommended solution
+is to be specific about tag names for those fields using the `toml` struct tag.
+
+[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781
+
+#### Ignore preexisting value in interface
+
+When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the
+element in the interface to decode the object. For example:
+
+```go
+type inner struct {
+ B interface{}
+}
+type doc struct {
+ A interface{}
+}
+
+d := doc{
+ A: inner{
+ B: "Before",
+ },
+}
+
+data := `
+[A]
+B = "After"
+`
+
+toml.Unmarshal([]byte(data), &d)
+fmt.Printf("toml v1: %#v\n", d)
+
+// toml v1: main.doc{A:main.inner{B:"After"}}
+```
+
+In this case, field `A` is of type `interface{}`, containing a `inner` struct.
+V1 sees that type and uses it when decoding the object.
+
+When decoding an object into an `interface{}`, V2 instead disregards whatever
+value the `interface{}` may contain and replaces it with a
+`map[string]interface{}`. With the same data structure as above, here is what
+the result looks like:
+
+```go
+toml.Unmarshal([]byte(data), &d)
+fmt.Printf("toml v2: %#v\n", d)
+
+// toml v2: main.doc{A:map[string]interface {}{"B":"After"}}
+```
+
+This is to match `encoding/json`'s behavior. There is no way to make the v2
+decoder behave like v1.
+
+#### Values out of array bounds ignored
+
+When decoding into an array, v1 returns an error when the number of elements
+contained in the doc is superior to the capacity of the array. For example:
+
+```go
+type doc struct {
+ A [2]string
+}
+d := doc{}
+err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
+fmt.Println(err)
+
+// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2)
+```
+
+In the same situation, v2 ignores the last value:
+
+```go
+err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
+fmt.Println("err:", err, "d:", d)
+// err: d: {[one two]}
+```
+
+This is to match `encoding/json`'s behavior. There is no way to make the v2
+decoder behave like v1.
+
+#### Support for `toml.Unmarshaler` has been dropped
+
+This method was not widely used, poorly defined, and added a lot of complexity.
+A similar effect can be achieved by implementing the `encoding.TextUnmarshaler`
+interface and use strings.
+
+#### Support for `default` struct tag has been dropped
+
+This feature adds complexity and a poorly defined API for an effect that can be
+accomplished outside of the library.
+
+It does not seem like other format parsers in Go support that feature (the
+project referenced in the original ticket #202 has not been updated since 2017).
+Given that go-toml v2 should not touch values not in the document, the same
+effect can be achieved by pre-filling the struct with defaults (libraries like
+[go-defaults][go-defaults] can help). Also, string representation is not well
+defined for all types: it creates issues like #278.
+
+The recommended replacement is pre-filling the struct before unmarshaling.
+
+[go-defaults]: https://github.com/mcuadros/go-defaults
+
+#### `toml.Tree` replacement
+
+This structure was the initial attempt at providing a document model for
+go-toml. It allows manipulating the structure of any document, encoding and
+decoding from their TOML representation. While a more robust feature was
+initially planned in go-toml v2, this has been ultimately [removed from
+scope][nodoc] of this library, with no plan to add it back at the moment. The
+closest equivalent at the moment would be to unmarshal into an `interface{}` and
+use type assertions and/or reflection to manipulate the arbitrary
+structure. However this would fall short of providing all of the TOML features
+such as adding comments and be specific about whitespace.
+
+
+#### `toml.Position` are not retrievable anymore
+
+The API for retrieving the position (line, column) of a specific TOML element do
+not exist anymore. This was done to minimize the amount of concepts introduced
+by the library (query path), and avoid the performance hit related to storing
+positions in the absence of a document model, for a feature that seemed to have
+little use. Errors however have gained more detailed position
+information. Position retrieval seems better fitted for a document model, which
+has been [removed from the scope][nodoc] of go-toml v2 at the moment.
+
+### Encoding / Marshal
+
+#### Default struct fields order
+
+V1 emits struct fields order alphabetically by default. V2 struct fields are
+emitted in order they are defined. For example:
+
+```go
+type S struct {
+ B string
+ A string
+}
+
+data := S{
+ B: "B",
+ A: "A",
+}
+
+b, _ := tomlv1.Marshal(data)
+fmt.Println("v1:\n" + string(b))
+
+b, _ = tomlv2.Marshal(data)
+fmt.Println("v2:\n" + string(b))
+
+// Output:
+// v1:
+// A = "A"
+// B = "B"
+
+// v2:
+// B = 'B'
+// A = 'A'
+```
+
+There is no way to make v2 encoder behave like v1. A workaround could be to
+manually sort the fields alphabetically in the struct definition, or generate
+struct types using `reflect.StructOf`.
+
+#### No indentation by default
+
+V1 automatically indents content of tables by default. V2 does not. However the
+same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example:
+
+```go
+data := map[string]interface{}{
+ "table": map[string]string{
+ "key": "value",
+ },
+}
+
+b, _ := tomlv1.Marshal(data)
+fmt.Println("v1:\n" + string(b))
+
+b, _ = tomlv2.Marshal(data)
+fmt.Println("v2:\n" + string(b))
+
+buf := bytes.Buffer{}
+enc := tomlv2.NewEncoder(&buf)
+enc.SetIndentTables(true)
+enc.Encode(data)
+fmt.Println("v2 Encoder:\n" + string(buf.Bytes()))
+
+// Output:
+// v1:
+//
+// [table]
+// key = "value"
+//
+// v2:
+// [table]
+// key = 'value'
+//
+//
+// v2 Encoder:
+// [table]
+// key = 'value'
+```
+
+[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables
+
+#### Keys and strings are single quoted
+
+V1 always uses double quotes (`"`) around strings and keys that cannot be
+represented bare (unquoted). V2 uses single quotes instead by default (`'`),
+unless a character cannot be represented, then falls back to double quotes. As a
+result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not
+useful anymore.
+
+There is no way to make v2 encoder behave like v1.
+
+#### `TextMarshaler` emits as a string, not TOML
+
+Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in
+v1. The encoder would append the result to the output directly. In v2 the result
+is wrapped in a string. As a result, this interface cannot be implemented by the
+root object.
+
+There is no way to make v2 encoder behave like v1.
+
+[tm]: https://golang.org/pkg/encoding/#TextMarshaler
+
+#### `Encoder.CompactComments` has been removed
+
+Emitting compact comments is now the default behavior of go-toml. This option
+is not necessary anymore.
+
+#### Struct tags have been merged
+
+V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
+`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
+`toml`, `multiline`, and `omitempty`. For example:
+
+```go
+type doc struct {
+ // v1
+ F string `toml:"field" multiline:"true" omitempty:"true"`
+ // v2
+ F string `toml:"field,multiline,omitempty"`
+}
+```
+
+Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
+one tag now.
+
+
+#### `commented` tag has been removed
+
+There is no replacement for the `commented` tag. This feature would be better
+suited in a proper document model for go-toml v2, which has been [cut from
+scope][nodoc] at the moment.
+
+#### `Encoder.ArraysWithOneElementPerLine` has been renamed
+
+The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
+
+#### `Encoder.Indentation` has been renamed
+
+The new name is `Encoder.SetIndentSymbol`. The behavior should be the same.
+
+
+#### Embedded structs behave like stdlib
+
+V1 defaults to merging embedded struct fields into the embedding struct. This
+behavior was unexpected because it does not follow the standard library. To
+avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was
+added to make the encoder behave correctly. Given backward compatibility is not
+a problem anymore, v2 does the right thing by default: it follows the behavior
+of `encoding/json`. `Encoder.PromoteAnonymous` has been removed.
+
+[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038
+
+### `query`
+
+go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run
+JSONPath-style queries on TOML files. This feature is not available in v2. For a
+replacement, check out [dasel][dasel].
+
+This package has been removed because it was essentially not supported anymore
+(last commit May 2020), increased the complexity of the code base, and more
+complete solutions exist out there.
+
+[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
+[dasel]: https://github.com/TomWright/dasel
+
+## Versioning
+
+Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
+of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
+this document. The last two major versions of Go are supported
+(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT). Read [LICENSE](LICENSE).
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/test/integration/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
new file mode 100644
index 000000000..b2f21cfc9
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+Use this section to tell people about which versions of your project are
+currently being supported with security updates.
+
+| Version | Supported |
+| ---------- | ------------------ |
+| Latest 2.x | :white_check_mark: |
+| All 1.x | :x: |
+| All 0.x | :x: |
+
+## Reporting a Vulnerability
+
+Email a vulnerability report to `security@pelletier.codes`. Make sure to include
+as many details as possible to reproduce the vulnerability. This is a
+side-project: I will try to get back to you as quickly as possible, time
+permitting in my personal life. Providing a working patch helps very much!
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/ci.sh b/test/integration/vendor/github.com/pelletier/go-toml/v2/ci.sh
new file mode 100644
index 000000000..d916c5f23
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/ci.sh
@@ -0,0 +1,279 @@
+#!/usr/bin/env bash
+
+
+stderr() {
+ echo "$@" 1>&2
+}
+
+usage() {
+ b=$(basename "$0")
+ echo $b: ERROR: "$@" 1>&2
+
+ cat 1>&2 < coverage.out
+ go tool cover -func=coverage.out
+ popd
+
+ if [ "${branch}" != "HEAD" ]; then
+ git worktree remove --force "$dir"
+ fi
+}
+
+coverage() {
+ case "$1" in
+ -d)
+ shift
+ target="${1?Need to provide a target branch argument}"
+
+ output_dir="$(mktemp -d)"
+ target_out="${output_dir}/target.txt"
+ head_out="${output_dir}/head.txt"
+
+ cover "${target}" > "${target_out}"
+ cover "HEAD" > "${head_out}"
+
+ cat "${target_out}"
+ cat "${head_out}"
+
+ echo ""
+
+ target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')"
+ head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')"
+ echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%"
+
+ delta_pct=$(echo "$head_pct - $target_pct" | bc -l)
+ echo "Delta: ${delta_pct}"
+
+ if [[ $delta_pct = \-* ]]; then
+ echo "Regression!";
+
+ target_diff="${output_dir}/target.diff.txt"
+ head_diff="${output_dir}/head.diff.txt"
+ cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}"
+ cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}"
+
+ diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}"
+ return 1
+ fi
+ return 0
+ ;;
+ esac
+
+ cover "${1-HEAD}"
+}
+
+bench() {
+ branch="${1}"
+ out="${2}"
+ replace="${3}"
+ dir="$(mktemp -d)"
+
+ stderr "Executing benchmark for ${branch} at ${dir}"
+
+ if [ "${branch}" = "HEAD" ]; then
+ cp -r . "${dir}/"
+ else
+ git worktree add "$dir" "$branch"
+ fi
+
+ pushd "$dir"
+
+ if [ "${replace}" != "" ]; then
+ find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \;
+ go get "${replace}"
+ fi
+
+ export GOMAXPROCS=2
+ nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}"
+ popd
+
+ if [ "${branch}" != "HEAD" ]; then
+ git worktree remove --force "$dir"
+ fi
+}
+
+fmktemp() {
+ if mktemp --version|grep GNU >/dev/null; then
+ mktemp --suffix=-$1;
+ else
+ mktemp -t $1;
+ fi
+}
+
+benchstathtml() {
+python3 - $1 <<'EOF'
+import sys
+
+lines = []
+stop = False
+
+with open(sys.argv[1]) as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line == "":
+ stop = True
+ if not stop:
+ lines.append(line.split(','))
+
+results = []
+for line in reversed(lines[1:]):
+ v2 = float(line[1])
+ results.append([
+ line[0].replace("-32", ""),
+ "%.1fx" % (float(line[3])/v2), # v1
+ "%.1fx" % (float(line[5])/v2), # bs
+ ])
+# move geomean to the end
+results.append(results[0])
+del results[0]
+
+
+def printtable(data):
+ print("""
+
+
+ Benchmark go-toml v1 BurntSushi/toml
+
+ """)
+
+ for r in data:
+ print(" {} {} {} ".format(*r))
+
+ print("""
+
""")
+
+
+def match(x):
+ return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0]
+
+above = [x for x in results if match(x)]
+below = [x for x in results if not match(x)]
+
+printtable(above)
+print("See more ")
+print("""The table above has the results of the most common use-cases. The table below
+contains the results of all benchmarks, including unrealistic ones. It is
+provided for completeness.
""")
+printtable(below)
+print('This table can be generated with ./ci.sh benchmark -a -html.
')
+print(" ")
+
+EOF
+}
+
+benchmark() {
+ case "$1" in
+ -d)
+ shift
+ target="${1?Need to provide a target branch argument}"
+
+ old=`fmktemp ${target}`
+ bench "${target}" "${old}"
+
+ new=`fmktemp HEAD`
+ bench HEAD "${new}"
+
+ benchstat "${old}" "${new}"
+ return 0
+ ;;
+ -a)
+ shift
+
+ v2stats=`fmktemp go-toml-v2`
+ bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2"
+ v1stats=`fmktemp go-toml-v1`
+ bench HEAD "${v1stats}" "github.com/pelletier/go-toml"
+ bsstats=`fmktemp bs-toml`
+ bench HEAD "${bsstats}" "github.com/BurntSushi/toml"
+
+ cp "${v2stats}" go-toml-v2.txt
+ cp "${v1stats}" go-toml-v1.txt
+ cp "${bsstats}" bs-toml.txt
+
+ if [ "$1" = "-html" ]; then
+ tmpcsv=`fmktemp csv`
+ benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
+ benchstathtml $tmpcsv
+ else
+ benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt
+ fi
+
+ rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt
+ return $?
+ esac
+
+ bench "${1-HEAD}" `mktemp`
+}
+
+case "$1" in
+ coverage) shift; coverage $@;;
+ benchmark) shift; benchmark $@;;
+ *) usage "bad argument $1";;
+esac
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go
new file mode 100644
index 000000000..4af965360
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go
@@ -0,0 +1,544 @@
+package toml
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+)
+
+func parseInteger(b []byte) (int64, error) {
+ if len(b) > 2 && b[0] == '0' {
+ switch b[1] {
+ case 'x':
+ return parseIntHex(b)
+ case 'b':
+ return parseIntBin(b)
+ case 'o':
+ return parseIntOct(b)
+ default:
+ panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1]))
+ }
+ }
+
+ return parseIntDec(b)
+}
+
+func parseLocalDate(b []byte) (LocalDate, error) {
+ // full-date = date-fullyear "-" date-month "-" date-mday
+ // date-fullyear = 4DIGIT
+ // date-month = 2DIGIT ; 01-12
+ // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
+ var date LocalDate
+
+ if len(b) != 10 || b[4] != '-' || b[7] != '-' {
+ return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD")
+ }
+
+ var err error
+
+ date.Year, err = parseDecimalDigits(b[0:4])
+ if err != nil {
+ return LocalDate{}, err
+ }
+
+ date.Month, err = parseDecimalDigits(b[5:7])
+ if err != nil {
+ return LocalDate{}, err
+ }
+
+ date.Day, err = parseDecimalDigits(b[8:10])
+ if err != nil {
+ return LocalDate{}, err
+ }
+
+ if !isValidDate(date.Year, date.Month, date.Day) {
+ return LocalDate{}, newDecodeError(b, "impossible date")
+ }
+
+ return date, nil
+}
+
+func parseDecimalDigits(b []byte) (int, error) {
+ v := 0
+
+ for i, c := range b {
+ if c < '0' || c > '9' {
+ return 0, newDecodeError(b[i:i+1], "expected digit (0-9)")
+ }
+ v *= 10
+ v += int(c - '0')
+ }
+
+ return v, nil
+}
+
+func parseDateTime(b []byte) (time.Time, error) {
+ // offset-date-time = full-date time-delim full-time
+ // full-time = partial-time time-offset
+ // time-offset = "Z" / time-numoffset
+ // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
+
+ dt, b, err := parseLocalDateTime(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ var zone *time.Location
+
+ if len(b) == 0 {
+ // parser should have checked that when assigning the date time node
+ panic("date time should have a timezone")
+ }
+
+ if b[0] == 'Z' || b[0] == 'z' {
+ b = b[1:]
+ zone = time.UTC
+ } else {
+ const dateTimeByteLen = 6
+ if len(b) != dateTimeByteLen {
+ return time.Time{}, newDecodeError(b, "invalid date-time timezone")
+ }
+ var direction int
+ switch b[0] {
+ case '-':
+ direction = -1
+ case '+':
+ direction = +1
+ default:
+ return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character")
+ }
+
+ if b[3] != ':' {
+ return time.Time{}, newDecodeError(b[3:4], "expected a : separator")
+ }
+
+ hours, err := parseDecimalDigits(b[1:3])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if hours > 23 {
+ return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours")
+ }
+
+ minutes, err := parseDecimalDigits(b[4:6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if minutes > 59 {
+ return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes")
+ }
+
+ seconds := direction * (hours*3600 + minutes*60)
+ if seconds == 0 {
+ zone = time.UTC
+ } else {
+ zone = time.FixedZone("", seconds)
+ }
+ b = b[dateTimeByteLen:]
+ }
+
+ if len(b) > 0 {
+ return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone")
+ }
+
+ t := time.Date(
+ dt.Year,
+ time.Month(dt.Month),
+ dt.Day,
+ dt.Hour,
+ dt.Minute,
+ dt.Second,
+ dt.Nanosecond,
+ zone)
+
+ return t, nil
+}
+
+func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
+ var dt LocalDateTime
+
+ const localDateTimeByteMinLen = 11
+ if len(b) < localDateTimeByteMinLen {
+ return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
+ }
+
+ date, err := parseLocalDate(b[:10])
+ if err != nil {
+ return dt, nil, err
+ }
+ dt.LocalDate = date
+
+ sep := b[10]
+ if sep != 'T' && sep != ' ' && sep != 't' {
+ return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space")
+ }
+
+ t, rest, err := parseLocalTime(b[11:])
+ if err != nil {
+ return dt, nil, err
+ }
+ dt.LocalTime = t
+
+ return dt, rest, nil
+}
+
+// parseLocalTime is a bit different because it also returns the remaining
+// []byte that is didn't need. This is to allow parseDateTime to parse those
+// remaining bytes as a timezone.
+func parseLocalTime(b []byte) (LocalTime, []byte, error) {
+ var (
+ nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0}
+ t LocalTime
+ )
+
+ // check if b matches to have expected format HH:MM:SS[.NNNNNN]
+ const localTimeByteLen = 8
+ if len(b) < localTimeByteLen {
+ return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
+ }
+
+ var err error
+
+ t.Hour, err = parseDecimalDigits(b[0:2])
+ if err != nil {
+ return t, nil, err
+ }
+
+ if t.Hour > 23 {
+ return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23")
+ }
+ if b[2] != ':' {
+ return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes")
+ }
+
+ t.Minute, err = parseDecimalDigits(b[3:5])
+ if err != nil {
+ return t, nil, err
+ }
+ if t.Minute > 59 {
+ return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59")
+ }
+ if b[5] != ':' {
+ return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds")
+ }
+
+ t.Second, err = parseDecimalDigits(b[6:8])
+ if err != nil {
+ return t, nil, err
+ }
+
+ if t.Second > 60 {
+ return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60")
+ }
+
+ b = b[8:]
+
+ if len(b) >= 1 && b[0] == '.' {
+ frac := 0
+ precision := 0
+ digits := 0
+
+ for i, c := range b[1:] {
+ if !isDigit(c) {
+ if i == 0 {
+ return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point")
+ }
+ break
+ }
+ digits++
+
+ const maxFracPrecision = 9
+ if i >= maxFracPrecision {
+ // go-toml allows decoding fractional seconds
+ // beyond the supported precision of 9
+ // digits. It truncates the fractional component
+ // to the supported precision and ignores the
+ // remaining digits.
+ //
+ // https://github.com/pelletier/go-toml/discussions/707
+ continue
+ }
+
+ frac *= 10
+ frac += int(c - '0')
+ precision++
+ }
+
+ if precision == 0 {
+ return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit")
+ }
+
+ t.Nanosecond = frac * nspow[precision]
+ t.Precision = precision
+
+ return t, b[1+digits:], nil
+ }
+ return t, b, nil
+}
+
+//nolint:cyclop
+func parseFloat(b []byte) (float64, error) {
+ if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' {
+ return math.NaN(), nil
+ }
+
+ cleaned, err := checkAndRemoveUnderscoresFloats(b)
+ if err != nil {
+ return 0, err
+ }
+
+ if cleaned[0] == '.' {
+ return 0, newDecodeError(b, "float cannot start with a dot")
+ }
+
+ if cleaned[len(cleaned)-1] == '.' {
+ return 0, newDecodeError(b, "float cannot end with a dot")
+ }
+
+ dotAlreadySeen := false
+ for i, c := range cleaned {
+ if c == '.' {
+ if dotAlreadySeen {
+ return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point")
+ }
+ if !isDigit(cleaned[i-1]) {
+ return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit")
+ }
+ if !isDigit(cleaned[i+1]) {
+ return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit")
+ }
+ dotAlreadySeen = true
+ }
+ }
+
+ start := 0
+ if cleaned[0] == '+' || cleaned[0] == '-' {
+ start = 1
+ }
+ if cleaned[start] == '0' && isDigit(cleaned[start+1]) {
+ return 0, newDecodeError(b, "float integer part cannot have leading zeroes")
+ }
+
+ f, err := strconv.ParseFloat(string(cleaned), 64)
+ if err != nil {
+ return 0, newDecodeError(b, "unable to parse float: %w", err)
+ }
+
+ return f, nil
+}
+
+func parseIntHex(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 16, 64)
+ if err != nil {
+ return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err)
+ }
+
+ return i, nil
+}
+
+func parseIntOct(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 8, 64)
+ if err != nil {
+ return 0, newDecodeError(b, "couldn't parse octal number: %w", err)
+ }
+
+ return i, nil
+}
+
+func parseIntBin(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 2, 64)
+ if err != nil {
+ return 0, newDecodeError(b, "couldn't parse binary number: %w", err)
+ }
+
+ return i, nil
+}
+
+func isSign(b byte) bool {
+ return b == '+' || b == '-'
+}
+
+func parseIntDec(b []byte) (int64, error) {
+ cleaned, err := checkAndRemoveUnderscoresIntegers(b)
+ if err != nil {
+ return 0, err
+ }
+
+ startIdx := 0
+
+ if isSign(cleaned[0]) {
+ startIdx++
+ }
+
+ if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
+ return 0, newDecodeError(b, "leading zero not allowed on decimal number")
+ }
+
+ i, err := strconv.ParseInt(string(cleaned), 10, 64)
+ if err != nil {
+ return 0, newDecodeError(b, "couldn't parse decimal number: %w", err)
+ }
+
+ return i, nil
+}
+
+func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
+ start := 0
+ if b[start] == '+' || b[start] == '-' {
+ start++
+ }
+
+ if len(b) == start {
+ return b, nil
+ }
+
+ if b[start] == '_' {
+ return nil, newDecodeError(b[start:start+1], "number cannot start with underscore")
+ }
+
+ if b[len(b)-1] == '_' {
+ return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore")
+ }
+
+ // fast path
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '_' {
+ break
+ }
+ }
+ if i == len(b) {
+ return b, nil
+ }
+
+ before := false
+ cleaned := make([]byte, i, len(b))
+ copy(cleaned, b)
+
+ for i++; i < len(b); i++ {
+ c := b[i]
+ if c == '_' {
+ if !before {
+ return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores")
+ }
+ before = false
+ } else {
+ before = true
+ cleaned = append(cleaned, c)
+ }
+ }
+
+ return cleaned, nil
+}
+
+func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
+ if b[0] == '_' {
+ return nil, newDecodeError(b[0:1], "number cannot start with underscore")
+ }
+
+ if b[len(b)-1] == '_' {
+ return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore")
+ }
+
+ // fast path
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '_' {
+ break
+ }
+ }
+ if i == len(b) {
+ return b, nil
+ }
+
+ before := false
+ cleaned := make([]byte, 0, len(b))
+
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+
+ switch c {
+ case '_':
+ if !before {
+ return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores")
+ }
+ if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
+ return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent")
+ }
+ before = false
+ case '+', '-':
+ // signed exponents
+ cleaned = append(cleaned, c)
+ before = false
+ case 'e', 'E':
+ if i < len(b)-1 && b[i+1] == '_' {
+ return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent")
+ }
+ cleaned = append(cleaned, c)
+ case '.':
+ if i < len(b)-1 && b[i+1] == '_' {
+ return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point")
+ }
+ if i > 0 && b[i-1] == '_' {
+ return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point")
+ }
+ cleaned = append(cleaned, c)
+ default:
+ before = true
+ cleaned = append(cleaned, c)
+ }
+ }
+
+ return cleaned, nil
+}
+
+// isValidDate checks if a provided date is a date that exists.
+func isValidDate(year int, month int, day int) bool {
+ return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year)
+}
+
+// daysBefore[m] counts the number of days in a non-leap year
+// before month m begins. There is an entry for m=12, counting
+// the number of days before January of next year (365).
+var daysBefore = [...]int32{
+ 0,
+ 31,
+ 31 + 28,
+ 31 + 28 + 31,
+ 31 + 28 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
+}
+
+func daysIn(m int, year int) int {
+ if m == 2 && isLeap(year) {
+ return 29
+ }
+ return int(daysBefore[m] - daysBefore[m-1])
+}
+
+func isLeap(year int) bool {
+ return year%4 == 0 && (year%100 != 0 || year%400 == 0)
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/doc.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/doc.go
new file mode 100644
index 000000000..b7bc599bd
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/doc.go
@@ -0,0 +1,2 @@
+// Package toml is a library to read and write TOML documents.
+package toml
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go
new file mode 100644
index 000000000..2e7f0ffdf
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go
@@ -0,0 +1,270 @@
+package toml
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/pelletier/go-toml/v2/internal/danger"
+)
+
+// DecodeError represents an error encountered during the parsing or decoding
+// of a TOML document.
+//
+// In addition to the error message, it contains the position in the document
+// where it happened, as well as a human-readable representation that shows
+// where the error occurred in the document.
+type DecodeError struct {
+ message string
+ line int
+ column int
+ key Key
+
+ human string
+}
+
+// StrictMissingError occurs in a TOML document that does not have a
+// corresponding field in the target value. It contains all the missing fields
+// in Errors.
+//
+// Emitted by Decoder when DisallowUnknownFields() was called.
+type StrictMissingError struct {
+ // One error per field that could not be found.
+ Errors []DecodeError
+}
+
+// Error returns the canonical string for this error.
+func (s *StrictMissingError) Error() string {
+ return "strict mode: fields in the document are missing in the target struct"
+}
+
+// String returns a human readable description of all errors.
+func (s *StrictMissingError) String() string {
+ var buf strings.Builder
+
+ for i, e := range s.Errors {
+ if i > 0 {
+ buf.WriteString("\n---\n")
+ }
+
+ buf.WriteString(e.String())
+ }
+
+ return buf.String()
+}
+
+type Key []string
+
+// internal version of DecodeError that is used as the base to create a
+// DecodeError with full context.
+type decodeError struct {
+ highlight []byte
+ message string
+ key Key // optional
+}
+
+func (de *decodeError) Error() string {
+ return de.message
+}
+
+func newDecodeError(highlight []byte, format string, args ...interface{}) error {
+ return &decodeError{
+ highlight: highlight,
+ message: fmt.Errorf(format, args...).Error(),
+ }
+}
+
+// Error returns the error message contained in the DecodeError.
+func (e *DecodeError) Error() string {
+ return "toml: " + e.message
+}
+
+// String returns the human-readable contextualized error. This string is multi-line.
+func (e *DecodeError) String() string {
+ return e.human
+}
+
+// Position returns the (line, column) pair indicating where the error
+// occurred in the document. Positions are 1-indexed.
+func (e *DecodeError) Position() (row int, column int) {
+ return e.line, e.column
+}
+
+// Key that was being processed when the error occurred. The key is present only
+// if this DecodeError is part of a StrictMissingError.
+func (e *DecodeError) Key() Key {
+ return e.key
+}
+
+// decodeErrorFromHighlight creates a DecodeError referencing a highlighted
+// range of bytes from document.
+//
+// highlight needs to be a sub-slice of document, or this function panics.
+//
+// The function copies all bytes used in DecodeError, so that document and
+// highlight can be freely deallocated.
+//
+//nolint:funlen
+func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
+ offset := danger.SubsliceOffset(document, de.highlight)
+
+ errMessage := de.Error()
+ errLine, errColumn := positionAtEnd(document[:offset])
+ before, after := linesOfContext(document, de.highlight, offset, 3)
+
+ var buf strings.Builder
+
+ maxLine := errLine + len(after) - 1
+ lineColumnWidth := len(strconv.Itoa(maxLine))
+
+ // Write the lines of context strictly before the error.
+ for i := len(before) - 1; i > 0; i-- {
+ line := errLine - i
+ buf.WriteString(formatLineNumber(line, lineColumnWidth))
+ buf.WriteString("|")
+
+ if len(before[i]) > 0 {
+ buf.WriteString(" ")
+ buf.Write(before[i])
+ }
+
+ buf.WriteRune('\n')
+ }
+
+ // Write the document line that contains the error.
+
+ buf.WriteString(formatLineNumber(errLine, lineColumnWidth))
+ buf.WriteString("| ")
+
+ if len(before) > 0 {
+ buf.Write(before[0])
+ }
+
+ buf.Write(de.highlight)
+
+ if len(after) > 0 {
+ buf.Write(after[0])
+ }
+
+ buf.WriteRune('\n')
+
+ // Write the line with the error message itself (so it does not have a line
+ // number).
+
+ buf.WriteString(strings.Repeat(" ", lineColumnWidth))
+ buf.WriteString("| ")
+
+ if len(before) > 0 {
+ buf.WriteString(strings.Repeat(" ", len(before[0])))
+ }
+
+ buf.WriteString(strings.Repeat("~", len(de.highlight)))
+
+ if len(errMessage) > 0 {
+ buf.WriteString(" ")
+ buf.WriteString(errMessage)
+ }
+
+ // Write the lines of context strictly after the error.
+
+ for i := 1; i < len(after); i++ {
+ buf.WriteRune('\n')
+ line := errLine + i
+ buf.WriteString(formatLineNumber(line, lineColumnWidth))
+ buf.WriteString("|")
+
+ if len(after[i]) > 0 {
+ buf.WriteString(" ")
+ buf.Write(after[i])
+ }
+ }
+
+ return &DecodeError{
+ message: errMessage,
+ line: errLine,
+ column: errColumn,
+ key: de.key,
+ human: buf.String(),
+ }
+}
+
+func formatLineNumber(line int, width int) string {
+ format := "%" + strconv.Itoa(width) + "d"
+
+ return fmt.Sprintf(format, line)
+}
+
+func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) {
+ return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround)
+}
+
+func beforeLines(document []byte, offset int, linesAround int) [][]byte {
+ var beforeLines [][]byte
+
+ // Walk the document backward from the highlight to find previous lines
+ // of context.
+ rest := document[:offset]
+backward:
+ for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; {
+ switch {
+ case rest[o] == '\n':
+ // handle individual lines
+ beforeLines = append(beforeLines, rest[o+1:])
+ rest = rest[:o]
+ o = len(rest) - 1
+ case o == 0:
+ // add the first line only if it's non-empty
+ beforeLines = append(beforeLines, rest)
+
+ break backward
+ default:
+ o--
+ }
+ }
+
+ return beforeLines
+}
+
+func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte {
+ var afterLines [][]byte
+
+ // Walk the document forward from the highlight to find the following
+ // lines of context.
+ rest := document[offset+len(highlight):]
+forward:
+ for o := 0; o < len(rest) && len(afterLines) <= linesAround; {
+ switch {
+ case rest[o] == '\n':
+ // handle individual lines
+ afterLines = append(afterLines, rest[:o])
+ rest = rest[o+1:]
+ o = 0
+
+ case o == len(rest)-1:
+ // add last line only if it's non-empty
+ afterLines = append(afterLines, rest)
+
+ break forward
+ default:
+ o++
+ }
+ }
+
+ return afterLines
+}
+
+func positionAtEnd(b []byte) (row int, column int) {
+ row = 1
+ column = 1
+
+ for _, c := range b {
+ if c == '\n' {
+ row++
+ column = 1
+ } else {
+ column++
+ }
+ }
+
+ return
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go
new file mode 100644
index 000000000..9dec2e000
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go
@@ -0,0 +1,144 @@
+package ast
+
+import (
+ "fmt"
+ "unsafe"
+
+ "github.com/pelletier/go-toml/v2/internal/danger"
+)
+
+// Iterator starts uninitialized, you need to call Next() first.
+//
+// For example:
+//
+// it := n.Children()
+// for it.Next() {
+// it.Node()
+// }
+type Iterator struct {
+ started bool
+ node *Node
+}
+
+// Next moves the iterator forward and returns true if points to a
+// node, false otherwise.
+func (c *Iterator) Next() bool {
+ if !c.started {
+ c.started = true
+ } else if c.node.Valid() {
+ c.node = c.node.Next()
+ }
+ return c.node.Valid()
+}
+
+// IsLast returns true if the current node of the iterator is the last
+// one. Subsequent call to Next() will return false.
+func (c *Iterator) IsLast() bool {
+ return c.node.next == 0
+}
+
+// Node returns a copy of the node pointed at by the iterator.
+func (c *Iterator) Node() *Node {
+ return c.node
+}
+
+// Root contains a full AST.
+//
+// It is immutable once constructed with Builder.
+type Root struct {
+ nodes []Node
+}
+
+// Iterator over the top level nodes.
+func (r *Root) Iterator() Iterator {
+ it := Iterator{}
+ if len(r.nodes) > 0 {
+ it.node = &r.nodes[0]
+ }
+ return it
+}
+
+func (r *Root) at(idx Reference) *Node {
+ return &r.nodes[idx]
+}
+
+// Arrays have one child per element in the array. InlineTables have
+// one child per key-value pair in the table. KeyValues have at least
+// two children. The first one is the value. The rest make a
+// potentially dotted key. Table and Array table have one child per
+// element of the key they represent (same as KeyValue, but without
+// the last node being the value).
+type Node struct {
+ Kind Kind
+ Raw Range // Raw bytes from the input.
+ Data []byte // Node value (either allocated or referencing the input).
+
+ // References to other nodes, as offsets in the backing array
+ // from this node. References can go backward, so those can be
+ // negative.
+ next int // 0 if last element
+ child int // 0 if no child
+}
+
+type Range struct {
+ Offset uint32
+ Length uint32
+}
+
+// Next returns a copy of the next node, or an invalid Node if there
+// is no next node.
+func (n *Node) Next() *Node {
+ if n.next == 0 {
+ return nil
+ }
+ ptr := unsafe.Pointer(n)
+ size := unsafe.Sizeof(Node{})
+ return (*Node)(danger.Stride(ptr, size, n.next))
+}
+
+// Child returns a copy of the first child node of this node. Other
+// children can be accessed calling Next on the first child. Returns
+// an invalid Node if there is none.
+func (n *Node) Child() *Node {
+ if n.child == 0 {
+ return nil
+ }
+ ptr := unsafe.Pointer(n)
+ size := unsafe.Sizeof(Node{})
+ return (*Node)(danger.Stride(ptr, size, n.child))
+}
+
+// Valid returns true if the node's kind is set (not to Invalid).
+func (n *Node) Valid() bool {
+ return n != nil
+}
+
+// Key returns the child nodes making the Key on a supported
+// node. Panics otherwise. They are guaranteed to be all be of the
+// Kind Key. A simple key would return just one element.
+func (n *Node) Key() Iterator {
+ switch n.Kind {
+ case KeyValue:
+ value := n.Child()
+ if !value.Valid() {
+ panic(fmt.Errorf("KeyValue should have at least two children"))
+ }
+ return Iterator{node: value.Next()}
+ case Table, ArrayTable:
+ return Iterator{node: n.Child()}
+ default:
+ panic(fmt.Errorf("Key() is not supported on a %s", n.Kind))
+ }
+}
+
+// Value returns a pointer to the value node of a KeyValue.
+// Guaranteed to be non-nil. Panics if not called on a KeyValue node,
+// or if the Children are malformed.
+func (n *Node) Value() *Node {
+ return n.Child()
+}
+
+// Children returns an iterator over a node's children.
+func (n *Node) Children() Iterator {
+ return Iterator{node: n.Child()}
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go
new file mode 100644
index 000000000..120f16e5c
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go
@@ -0,0 +1,51 @@
+package ast
+
+type Reference int
+
+const InvalidReference Reference = -1
+
+func (r Reference) Valid() bool {
+ return r != InvalidReference
+}
+
+type Builder struct {
+ tree Root
+ lastIdx int
+}
+
+func (b *Builder) Tree() *Root {
+ return &b.tree
+}
+
+func (b *Builder) NodeAt(ref Reference) *Node {
+ return b.tree.at(ref)
+}
+
+func (b *Builder) Reset() {
+ b.tree.nodes = b.tree.nodes[:0]
+ b.lastIdx = 0
+}
+
+func (b *Builder) Push(n Node) Reference {
+ b.lastIdx = len(b.tree.nodes)
+ b.tree.nodes = append(b.tree.nodes, n)
+ return Reference(b.lastIdx)
+}
+
+func (b *Builder) PushAndChain(n Node) Reference {
+ newIdx := len(b.tree.nodes)
+ b.tree.nodes = append(b.tree.nodes, n)
+ if b.lastIdx >= 0 {
+ b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
+ }
+ b.lastIdx = newIdx
+ return Reference(b.lastIdx)
+}
+
+func (b *Builder) AttachChild(parent Reference, child Reference) {
+ b.tree.nodes[parent].child = int(child) - int(parent)
+}
+
+func (b *Builder) Chain(from Reference, to Reference) {
+ b.tree.nodes[from].next = int(to) - int(from)
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go
new file mode 100644
index 000000000..2b50c67fc
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go
@@ -0,0 +1,69 @@
+package ast
+
+import "fmt"
+
+type Kind int
+
+const (
+ // meta
+ Invalid Kind = iota
+ Comment
+ Key
+
+ // top level structures
+ Table
+ ArrayTable
+ KeyValue
+
+ // containers values
+ Array
+ InlineTable
+
+ // values
+ String
+ Bool
+ Float
+ Integer
+ LocalDate
+ LocalTime
+ LocalDateTime
+ DateTime
+)
+
+func (k Kind) String() string {
+ switch k {
+ case Invalid:
+ return "Invalid"
+ case Comment:
+ return "Comment"
+ case Key:
+ return "Key"
+ case Table:
+ return "Table"
+ case ArrayTable:
+ return "ArrayTable"
+ case KeyValue:
+ return "KeyValue"
+ case Array:
+ return "Array"
+ case InlineTable:
+ return "InlineTable"
+ case String:
+ return "String"
+ case Bool:
+ return "Bool"
+ case Float:
+ return "Float"
+ case Integer:
+ return "Integer"
+ case LocalDate:
+ return "LocalDate"
+ case LocalTime:
+ return "LocalTime"
+ case LocalDateTime:
+ return "LocalDateTime"
+ case DateTime:
+ return "DateTime"
+ }
+ panic(fmt.Errorf("Kind.String() not implemented for '%d'", k))
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
new file mode 100644
index 000000000..e38e1131b
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
@@ -0,0 +1,65 @@
+package danger
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+const maxInt = uintptr(int(^uint(0) >> 1))
+
+func SubsliceOffset(data []byte, subslice []byte) int {
+ datap := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice))
+
+ if hlp.Data < datap.Data {
+ panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data))
+ }
+ offset := hlp.Data - datap.Data
+
+ if offset > maxInt {
+ panic(fmt.Errorf("slice offset larger than int (%d)", offset))
+ }
+
+ intoffset := int(offset)
+
+ if intoffset > datap.Len {
+ panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len))
+ }
+
+ if intoffset+hlp.Len > datap.Len {
+ panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len))
+ }
+
+ return intoffset
+}
+
+func BytesRange(start []byte, end []byte) []byte {
+ if start == nil || end == nil {
+ panic("cannot call BytesRange with nil")
+ }
+ startp := (*reflect.SliceHeader)(unsafe.Pointer(&start))
+ endp := (*reflect.SliceHeader)(unsafe.Pointer(&end))
+
+ if startp.Data > endp.Data {
+ panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data))
+ }
+
+ l := startp.Len
+ endLen := int(endp.Data-startp.Data) + endp.Len
+ if endLen > l {
+ l = endLen
+ }
+
+ if l > startp.Cap {
+ panic(fmt.Errorf("range length is larger than capacity"))
+ }
+
+ return start[:l]
+}
+
+func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
+ // TODO: replace with unsafe.Add when Go 1.17 is released
+ // https://github.com/golang/go/issues/40481
+ return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
new file mode 100644
index 000000000..9d41c28a2
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
@@ -0,0 +1,23 @@
+package danger
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// typeID is used as key in encoder and decoder caches to enable using
+// the optimize runtime.mapaccess2_fast64 function instead of the more
+// expensive lookup if we were to use reflect.Type as map key.
+//
+// typeID holds the pointer to the reflect.Type value, which is unique
+// in the program.
+//
+// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
+type TypeID unsafe.Pointer
+
+func MakeTypeID(t reflect.Type) TypeID {
+ // reflect.Type has the fields:
+ // typ unsafe.Pointer
+ // ptr unsafe.Pointer
+ return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
new file mode 100644
index 000000000..7c148f48d
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
@@ -0,0 +1,50 @@
+package tracker
+
+import (
+ "github.com/pelletier/go-toml/v2/internal/ast"
+)
+
+// KeyTracker is a tracker that keeps track of the current Key as the AST is
+// walked.
+type KeyTracker struct {
+ k []string
+}
+
+// UpdateTable sets the state of the tracker with the AST table node.
+func (t *KeyTracker) UpdateTable(node *ast.Node) {
+ t.reset()
+ t.Push(node)
+}
+
+// UpdateArrayTable sets the state of the tracker with the AST array table node.
+func (t *KeyTracker) UpdateArrayTable(node *ast.Node) {
+ t.reset()
+ t.Push(node)
+}
+
+// Push the given key on the stack.
+func (t *KeyTracker) Push(node *ast.Node) {
+ it := node.Key()
+ for it.Next() {
+ t.k = append(t.k, string(it.Node().Data))
+ }
+}
+
+// Pop key from stack.
+func (t *KeyTracker) Pop(node *ast.Node) {
+ it := node.Key()
+ for it.Next() {
+ t.k = t.k[:len(t.k)-1]
+ }
+}
+
+// Key returns the current key
+func (t *KeyTracker) Key() []string {
+ k := make([]string, len(t.k))
+ copy(k, t.k)
+ return k
+}
+
+func (t *KeyTracker) reset() {
+ t.k = t.k[:0]
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
new file mode 100644
index 000000000..a7ee05ba6
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
@@ -0,0 +1,356 @@
+package tracker
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ "github.com/pelletier/go-toml/v2/internal/ast"
+)
+
+type keyKind uint8
+
+const (
+ invalidKind keyKind = iota
+ valueKind
+ tableKind
+ arrayTableKind
+)
+
+func (k keyKind) String() string {
+ switch k {
+ case invalidKind:
+ return "invalid"
+ case valueKind:
+ return "value"
+ case tableKind:
+ return "table"
+ case arrayTableKind:
+ return "array table"
+ }
+ panic("missing keyKind string mapping")
+}
+
+// SeenTracker tracks which keys have been seen with which TOML type to flag
+// duplicates and mismatches according to the spec.
+//
+// Each node in the visited tree is represented by an entry. Each entry has an
+// identifier, which is provided by a counter. Entries are stored in the array
+// entries. As new nodes are discovered (referenced for the first time in the
+// TOML document), entries are created and appended to the array. An entry
+// points to its parent using its id.
+//
+// To find whether a given key (sequence of []byte) has already been visited,
+// the entries are linearly searched, looking for one with the right name and
+// parent id.
+//
+// Given that all keys appear in the document after their parent, it is
+// guaranteed that all descendants of a node are stored after the node, this
+// speeds up the search process.
+//
+// When encountering [[array tables]], the descendants of that node are removed
+// to allow that branch of the tree to be "rediscovered". To maintain the
+// invariant above, the deletion process needs to keep the order of entries.
+// This results in more copies in that case.
+type SeenTracker struct {
+ entries []entry
+ currentIdx int
+}
+
+var pool sync.Pool
+
+func (s *SeenTracker) reset() {
+ // Always contains a root element at index 0.
+ s.currentIdx = 0
+ if len(s.entries) == 0 {
+ s.entries = make([]entry, 1, 2)
+ } else {
+ s.entries = s.entries[:1]
+ }
+ s.entries[0].child = -1
+ s.entries[0].next = -1
+}
+
+type entry struct {
+ // Use -1 to indicate no child or no sibling.
+ child int
+ next int
+
+ name []byte
+ kind keyKind
+ explicit bool
+ kv bool
+}
+
+// Find the index of the child of parentIdx with key k. Returns -1 if
+// it does not exist.
+func (s *SeenTracker) find(parentIdx int, k []byte) int {
+ for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
+ if bytes.Equal(s.entries[i].name, k) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Remove all descendants of node at position idx.
+func (s *SeenTracker) clear(idx int) {
+ if idx >= len(s.entries) {
+ return
+ }
+
+ for i := s.entries[idx].child; i >= 0; {
+ next := s.entries[i].next
+ n := s.entries[0].next
+ s.entries[0].next = i
+ s.entries[i].next = n
+ s.entries[i].name = nil
+ s.clear(i)
+ i = next
+ }
+
+ s.entries[idx].child = -1
+}
+
+func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int {
+ e := entry{
+ child: -1,
+ next: s.entries[parentIdx].child,
+
+ name: name,
+ kind: kind,
+ explicit: explicit,
+ kv: kv,
+ }
+ var idx int
+ if s.entries[0].next >= 0 {
+ idx = s.entries[0].next
+ s.entries[0].next = s.entries[idx].next
+ s.entries[idx] = e
+ } else {
+ idx = len(s.entries)
+ s.entries = append(s.entries, e)
+ }
+
+ s.entries[parentIdx].child = idx
+
+ return idx
+}
+
+func (s *SeenTracker) setExplicitFlag(parentIdx int) {
+ for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
+ if s.entries[i].kv {
+ s.entries[i].explicit = true
+ s.entries[i].kv = false
+ }
+ s.setExplicitFlag(i)
+ }
+}
+
+// CheckExpression takes a top-level node and checks that it does not contain
+// keys that have been seen in previous calls, and validates that types are
+// consistent.
+func (s *SeenTracker) CheckExpression(node *ast.Node) error {
+ if s.entries == nil {
+ s.reset()
+ }
+ switch node.Kind {
+ case ast.KeyValue:
+ return s.checkKeyValue(node)
+ case ast.Table:
+ return s.checkTable(node)
+ case ast.ArrayTable:
+ return s.checkArrayTable(node)
+ default:
+ panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind))
+ }
+}
+
+func (s *SeenTracker) checkTable(node *ast.Node) error {
+ if s.currentIdx >= 0 {
+ s.setExplicitFlag(s.currentIdx)
+ }
+
+ it := node.Key()
+
+ parentIdx := 0
+
+ // This code is duplicated in checkArrayTable. This is because factoring
+ // it in a function requires to copy the iterator, or allocate it to the
+ // heap, which is not cheap.
+ for it.Next() {
+ if it.IsLast() {
+ break
+ }
+
+ k := it.Node().Data
+
+ idx := s.find(parentIdx, k)
+
+ if idx < 0 {
+ idx = s.create(parentIdx, k, tableKind, false, false)
+ } else {
+ entry := s.entries[idx]
+ if entry.kind == valueKind {
+ return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
+ }
+ }
+ parentIdx = idx
+ }
+
+ k := it.Node().Data
+ idx := s.find(parentIdx, k)
+
+ if idx >= 0 {
+ kind := s.entries[idx].kind
+ if kind != tableKind {
+ return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind)
+ }
+ if s.entries[idx].explicit {
+ return fmt.Errorf("toml: table %s already exists", string(k))
+ }
+ s.entries[idx].explicit = true
+ } else {
+ idx = s.create(parentIdx, k, tableKind, true, false)
+ }
+
+ s.currentIdx = idx
+
+ return nil
+}
+
+func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
+ if s.currentIdx >= 0 {
+ s.setExplicitFlag(s.currentIdx)
+ }
+
+ it := node.Key()
+
+ parentIdx := 0
+
+ for it.Next() {
+ if it.IsLast() {
+ break
+ }
+
+ k := it.Node().Data
+
+ idx := s.find(parentIdx, k)
+
+ if idx < 0 {
+ idx = s.create(parentIdx, k, tableKind, false, false)
+ } else {
+ entry := s.entries[idx]
+ if entry.kind == valueKind {
+ return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
+ }
+ }
+
+ parentIdx = idx
+ }
+
+ k := it.Node().Data
+ idx := s.find(parentIdx, k)
+
+ if idx >= 0 {
+ kind := s.entries[idx].kind
+ if kind != arrayTableKind {
+ return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k))
+ }
+ s.clear(idx)
+ } else {
+ idx = s.create(parentIdx, k, arrayTableKind, true, false)
+ }
+
+ s.currentIdx = idx
+
+ return nil
+}
+
+func (s *SeenTracker) checkKeyValue(node *ast.Node) error {
+ parentIdx := s.currentIdx
+ it := node.Key()
+
+ for it.Next() {
+ k := it.Node().Data
+
+ idx := s.find(parentIdx, k)
+
+ if idx < 0 {
+ idx = s.create(parentIdx, k, tableKind, false, true)
+ } else {
+ entry := s.entries[idx]
+ if it.IsLast() {
+ return fmt.Errorf("toml: key %s is already defined", string(k))
+ } else if entry.kind != tableKind {
+ return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
+ } else if entry.explicit {
+ return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k))
+ }
+ }
+
+ parentIdx = idx
+ }
+
+ s.entries[parentIdx].kind = valueKind
+
+ value := node.Value()
+
+ switch value.Kind {
+ case ast.InlineTable:
+ return s.checkInlineTable(value)
+ case ast.Array:
+ return s.checkArray(value)
+ }
+
+ return nil
+}
+
+func (s *SeenTracker) checkArray(node *ast.Node) error {
+ it := node.Children()
+ for it.Next() {
+ n := it.Node()
+ switch n.Kind {
+ case ast.InlineTable:
+ err := s.checkInlineTable(n)
+ if err != nil {
+ return err
+ }
+ case ast.Array:
+ err := s.checkArray(n)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (s *SeenTracker) checkInlineTable(node *ast.Node) error {
+ if pool.New == nil {
+ pool.New = func() interface{} {
+ return &SeenTracker{}
+ }
+ }
+
+ s = pool.Get().(*SeenTracker)
+ s.reset()
+
+ it := node.Children()
+ for it.Next() {
+ n := it.Node()
+ err := s.checkKeyValue(n)
+ if err != nil {
+ return err
+ }
+ }
+
+ // As inline tables are self-contained, the tracker does not
+ // need to retain the details of what they contain. The
+ // keyValue element that creates the inline table is kept to
+ // mark the presence of the inline table and prevent
+ // redefinition of its keys: check* functions cannot walk into
+ // a value.
+ pool.Put(s)
+ return nil
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
new file mode 100644
index 000000000..bf0317392
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
@@ -0,0 +1 @@
+package tracker
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go
new file mode 100644
index 000000000..30a31dcbd
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go
@@ -0,0 +1,120 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// LocalDate represents a calendar day in no specific timezone.
+type LocalDate struct {
+ Year int
+ Month int
+ Day int
+}
+
+// AsTime converts d into a specific time instance at midnight in zone.
+func (d LocalDate) AsTime(zone *time.Location) time.Time {
+ return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone)
+}
+
+// String returns RFC 3339 representation of d.
+func (d LocalDate) String() string {
+ return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
+}
+
+// MarshalText returns RFC 3339 representation of d.
+func (d LocalDate) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText parses b using RFC 3339 to fill d.
+func (d *LocalDate) UnmarshalText(b []byte) error {
+ res, err := parseLocalDate(b)
+ if err != nil {
+ return err
+ }
+ *d = res
+ return nil
+}
+
+// LocalTime represents a time of day of no specific day in no specific
+// timezone.
+type LocalTime struct {
+ Hour int // Hour of the day: [0; 24[
+ Minute int // Minute of the hour: [0; 60[
+ Second int // Second of the minute: [0; 60[
+ Nanosecond int // Nanoseconds within the second: [0, 1000000000[
+ Precision int // Number of digits to display for Nanosecond.
+}
+
+// String returns RFC 3339 representation of d.
+// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond
+// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number
+// of digits for nanoseconds is provided.
+func (d LocalTime) String() string {
+ s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second)
+
+ if d.Precision > 0 {
+ s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1]
+ } else if d.Nanosecond > 0 {
+ // Nanoseconds are specified, but precision is not provided. Use the
+ // minimum.
+ s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0")
+ }
+
+ return s
+}
+
+// MarshalText returns RFC 3339 representation of d.
+func (d LocalTime) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText parses b using RFC 3339 to fill d.
+func (d *LocalTime) UnmarshalText(b []byte) error {
+ res, left, err := parseLocalTime(b)
+ if err == nil && len(left) != 0 {
+ err = newDecodeError(left, "extra characters")
+ }
+ if err != nil {
+ return err
+ }
+ *d = res
+ return nil
+}
+
+// LocalDateTime represents a time of a specific day in no specific timezone.
+type LocalDateTime struct {
+ LocalDate
+ LocalTime
+}
+
+// AsTime converts d into a specific time instance in zone.
+func (d LocalDateTime) AsTime(zone *time.Location) time.Time {
+ return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone)
+}
+
+// String returns RFC 3339 representation of d.
+func (d LocalDateTime) String() string {
+ return d.LocalDate.String() + "T" + d.LocalTime.String()
+}
+
+// MarshalText returns RFC 3339 representation of d.
+func (d LocalDateTime) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText parses b using RFC 3339 to fill d.
+func (d *LocalDateTime) UnmarshalText(data []byte) error {
+ res, left, err := parseLocalDateTime(data)
+ if err == nil && len(left) != 0 {
+ err = newDecodeError(left, "extra characters")
+ }
+ if err != nil {
+ return err
+ }
+
+ *d = res
+ return nil
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go
new file mode 100644
index 000000000..acb288315
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go
@@ -0,0 +1,1040 @@
+package toml
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// Marshal serializes a Go value as a TOML document.
+//
+// It is a shortcut for Encoder.Encode() with the default options.
+func Marshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+
+ err := enc.Encode(v)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+// Encoder writes a TOML document to an output stream.
+type Encoder struct {
+ // output
+ w io.Writer
+
+ // global settings
+ tablesInline bool
+ arraysMultiline bool
+ indentSymbol string
+ indentTables bool
+}
+
+// NewEncoder returns a new Encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ indentSymbol: " ",
+ }
+}
+
+// SetTablesInline forces the encoder to emit all tables inline.
+//
+// This behavior can be controlled on an individual struct field basis with the
+// inline tag:
+//
+// MyField `toml:",inline"`
+func (enc *Encoder) SetTablesInline(inline bool) *Encoder {
+ enc.tablesInline = inline
+ return enc
+}
+
+// SetArraysMultiline forces the encoder to emit all arrays with one element per
+// line.
+//
+// This behavior can be controlled on an individual struct field basis with the multiline tag:
+//
+// MyField `multiline:"true"`
+func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder {
+ enc.arraysMultiline = multiline
+ return enc
+}
+
+// SetIndentSymbol defines the string that should be used for indentation. The
+// provided string is repeated for each indentation level. Defaults to two
+// spaces.
+func (enc *Encoder) SetIndentSymbol(s string) *Encoder {
+ enc.indentSymbol = s
+ return enc
+}
+
+// SetIndentTables forces the encoder to intent tables and array tables.
+func (enc *Encoder) SetIndentTables(indent bool) *Encoder {
+ enc.indentTables = indent
+ return enc
+}
+
+// Encode writes a TOML representation of v to the stream.
+//
+// If v cannot be represented to TOML it returns an error.
+//
+// # Encoding rules
+//
+// A top level slice containing only maps or structs is encoded as [[table
+// array]].
+//
+// All slices not matching rule 1 are encoded as [array]. As a result, any map
+// or struct they contain is encoded as an {inline table}.
+//
+// Nil interfaces and nil pointers are not supported.
+//
+// Keys in key-values always have one part.
+//
+// Intermediate tables are always printed.
+//
+// By default, strings are encoded as literal string, unless they contain either
+// a newline character or a single quote. In that case they are emitted as
+// quoted strings.
+//
+// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so
+// results in an error. This rule exists because the TOML specification only
+// requires parsers to support at least the 64 bits integer range. Allowing
+// larger numbers would create non-standard TOML documents, which may not be
+// readable (at best) by other implementations. To encode such numbers, a
+// solution is a custom type that implements encoding.TextMarshaler.
+//
+// When encoding structs, fields are encoded in order of definition, with their
+// exact name.
+//
+// Tables and array tables are separated by empty lines. However, consecutive
+// subtables definitions are not. For example:
+//
+// [top1]
+//
+// [top2]
+// [top2.child1]
+//
+// [[array]]
+//
+// [[array]]
+// [array.child2]
+//
+// # Struct tags
+//
+// The encoding of each public struct field can be customized by the format
+// string in the "toml" key of the struct field's tag. This follows
+// encoding/json's convention. The format string starts with the name of the
+// field, optionally followed by a comma-separated list of options. The name may
+// be empty in order to provide options without overriding the default name.
+//
+// The "multiline" option emits strings as quoted multi-line TOML strings. It
+// has no effect on fields that would not be encoded as strings.
+//
+// The "inline" option turns fields that would be emitted as tables into inline
+// tables instead. It has no effect on other fields.
+//
+// The "omitempty" option prevents empty values or groups from being emitted.
+//
+// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit
+// a TOML comment before the value being annotated. Comments are ignored inside
+// inline tables. For array tables, the comment is only present before the first
+// element of the array.
+func (enc *Encoder) Encode(v interface{}) error {
+ var (
+ b []byte
+ ctx encoderCtx
+ )
+
+ ctx.inline = enc.tablesInline
+
+ if v == nil {
+ return fmt.Errorf("toml: cannot encode a nil interface")
+ }
+
+ b, err := enc.encode(b, ctx, reflect.ValueOf(v))
+ if err != nil {
+ return err
+ }
+
+ _, err = enc.w.Write(b)
+ if err != nil {
+ return fmt.Errorf("toml: cannot write: %w", err)
+ }
+
+ return nil
+}
+
+type valueOptions struct {
+ multiline bool
+ omitempty bool
+ comment string
+}
+
+type encoderCtx struct {
+ // Current top-level key.
+ parentKey []string
+
+ // Key that should be used for a KV.
+ key string
+ // Extra flag to account for the empty string
+ hasKey bool
+
+ // Set to true to indicate that the encoder is inside a KV, so that all
+ // tables need to be inlined.
+ insideKv bool
+
+ // Set to true to skip the first table header in an array table.
+ skipTableHeader bool
+
+ // Should the next table be encoded as inline
+ inline bool
+
+ // Indentation level
+ indent int
+
+ // Options coming from struct tags
+ options valueOptions
+}
+
+func (ctx *encoderCtx) shiftKey() {
+ if ctx.hasKey {
+ ctx.parentKey = append(ctx.parentKey, ctx.key)
+ ctx.clearKey()
+ }
+}
+
+func (ctx *encoderCtx) setKey(k string) {
+ ctx.key = k
+ ctx.hasKey = true
+}
+
+func (ctx *encoderCtx) clearKey() {
+ ctx.key = ""
+ ctx.hasKey = false
+}
+
+func (ctx *encoderCtx) isRoot() bool {
+ return len(ctx.parentKey) == 0 && !ctx.hasKey
+}
+
+func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ i := v.Interface()
+
+ switch x := i.(type) {
+ case time.Time:
+ if x.Nanosecond() > 0 {
+ return x.AppendFormat(b, time.RFC3339Nano), nil
+ }
+ return x.AppendFormat(b, time.RFC3339), nil
+ case LocalTime:
+ return append(b, x.String()...), nil
+ case LocalDate:
+ return append(b, x.String()...), nil
+ case LocalDateTime:
+ return append(b, x.String()...), nil
+ }
+
+ hasTextMarshaler := v.Type().Implements(textMarshalerType)
+ if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) {
+ if !hasTextMarshaler {
+ v = v.Addr()
+ }
+
+ if ctx.isRoot() {
+ return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type())
+ }
+
+ text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return nil, err
+ }
+
+ b = enc.encodeString(b, string(text), ctx.options)
+
+ return b, nil
+ }
+
+ switch v.Kind() {
+ // containers
+ case reflect.Map:
+ return enc.encodeMap(b, ctx, v)
+ case reflect.Struct:
+ return enc.encodeStruct(b, ctx, v)
+ case reflect.Slice:
+ return enc.encodeSlice(b, ctx, v)
+ case reflect.Interface:
+ if v.IsNil() {
+ return nil, fmt.Errorf("toml: encoding a nil interface is not supported")
+ }
+
+ return enc.encode(b, ctx, v.Elem())
+ case reflect.Ptr:
+ if v.IsNil() {
+ return enc.encode(b, ctx, reflect.Zero(v.Type().Elem()))
+ }
+
+ return enc.encode(b, ctx, v.Elem())
+
+ // values
+ case reflect.String:
+ b = enc.encodeString(b, v.String(), ctx.options)
+ case reflect.Float32:
+ f := v.Float()
+
+ if math.IsNaN(f) {
+ b = append(b, "nan"...)
+ } else if f > math.MaxFloat32 {
+ b = append(b, "inf"...)
+ } else if f < -math.MaxFloat32 {
+ b = append(b, "-inf"...)
+ } else if math.Trunc(f) == f {
+ b = strconv.AppendFloat(b, f, 'f', 1, 32)
+ } else {
+ b = strconv.AppendFloat(b, f, 'f', -1, 32)
+ }
+ case reflect.Float64:
+ f := v.Float()
+ if math.IsNaN(f) {
+ b = append(b, "nan"...)
+ } else if f > math.MaxFloat64 {
+ b = append(b, "inf"...)
+ } else if f < -math.MaxFloat64 {
+ b = append(b, "-inf"...)
+ } else if math.Trunc(f) == f {
+ b = strconv.AppendFloat(b, f, 'f', 1, 64)
+ } else {
+ b = strconv.AppendFloat(b, f, 'f', -1, 64)
+ }
+ case reflect.Bool:
+ if v.Bool() {
+ b = append(b, "true"...)
+ } else {
+ b = append(b, "false"...)
+ }
+ case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
+ x := v.Uint()
+ if x > uint64(math.MaxInt64) {
+ return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64))
+ }
+ b = strconv.AppendUint(b, x, 10)
+ case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int:
+ b = strconv.AppendInt(b, v.Int(), 10)
+ default:
+ return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind())
+ }
+
+ return b, nil
+}
+
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Map:
+ return v.IsNil()
+ default:
+ return false
+ }
+}
+
+func shouldOmitEmpty(options valueOptions, v reflect.Value) bool {
+ return options.omitempty && isEmptyValue(v)
+}
+
+func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) {
+ var err error
+
+ if !ctx.inline {
+ b = enc.encodeComment(ctx.indent, options.comment, b)
+ }
+
+ b = enc.indent(ctx.indent, b)
+ b = enc.encodeKey(b, ctx.key)
+ b = append(b, " = "...)
+
+ // create a copy of the context because the value of a KV shouldn't
+ // modify the global context.
+ subctx := ctx
+ subctx.insideKv = true
+ subctx.shiftKey()
+ subctx.options = options
+
+ b, err = enc.encode(b, subctx, v)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Struct:
+ return isEmptyStruct(v)
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func isEmptyStruct(v reflect.Value) bool {
+ // TODO: merge with walkStruct and cache.
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ fieldType := typ.Field(i)
+
+ // only consider exported fields
+ if fieldType.PkgPath != "" {
+ continue
+ }
+
+ tag := fieldType.Tag.Get("toml")
+
+ // special field name to skip field
+ if tag == "-" {
+ continue
+ }
+
+ f := v.Field(i)
+
+ if !isEmptyValue(f) {
+ return false
+ }
+ }
+
+ return true
+}
+
+const literalQuote = '\''
+
+func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte {
+ if needsQuoting(v) {
+ return enc.encodeQuotedString(options.multiline, b, v)
+ }
+
+ return enc.encodeLiteralString(b, v)
+}
+
+func needsQuoting(v string) bool {
+ // TODO: vectorize
+ for _, b := range []byte(v) {
+ if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) {
+ return true
+ }
+ }
+ return false
+}
+
+// caller should have checked that the string does not contain new lines or ' .
+func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte {
+ b = append(b, literalQuote)
+ b = append(b, v...)
+ b = append(b, literalQuote)
+
+ return b
+}
+
+func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte {
+ stringQuote := `"`
+
+ if multiline {
+ stringQuote = `"""`
+ }
+
+ b = append(b, stringQuote...)
+ if multiline {
+ b = append(b, '\n')
+ }
+
+ const (
+ hextable = "0123456789ABCDEF"
+ // U+0000 to U+0008, U+000A to U+001F, U+007F
+ nul = 0x0
+ bs = 0x8
+ lf = 0xa
+ us = 0x1f
+ del = 0x7f
+ )
+
+ for _, r := range []byte(v) {
+ switch r {
+ case '\\':
+ b = append(b, `\\`...)
+ case '"':
+ b = append(b, `\"`...)
+ case '\b':
+ b = append(b, `\b`...)
+ case '\f':
+ b = append(b, `\f`...)
+ case '\n':
+ if multiline {
+ b = append(b, r)
+ } else {
+ b = append(b, `\n`...)
+ }
+ case '\r':
+ b = append(b, `\r`...)
+ case '\t':
+ b = append(b, `\t`...)
+ default:
+ switch {
+ case r >= nul && r <= bs, r >= lf && r <= us, r == del:
+ b = append(b, `\u00`...)
+ b = append(b, hextable[r>>4])
+ b = append(b, hextable[r&0x0f])
+ default:
+ b = append(b, r)
+ }
+ }
+ }
+
+ b = append(b, stringQuote...)
+
+ return b
+}
+
+// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ .
+func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte {
+ return append(b, v...)
+}
+
+func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) {
+ if len(ctx.parentKey) == 0 {
+ return b, nil
+ }
+
+ b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
+
+ b = enc.indent(ctx.indent, b)
+
+ b = append(b, '[')
+
+ b = enc.encodeKey(b, ctx.parentKey[0])
+
+ for _, k := range ctx.parentKey[1:] {
+ b = append(b, '.')
+ b = enc.encodeKey(b, k)
+ }
+
+ b = append(b, "]\n"...)
+
+ return b, nil
+}
+
+//nolint:cyclop
+func (enc *Encoder) encodeKey(b []byte, k string) []byte {
+ needsQuotation := false
+ cannotUseLiteral := false
+
+ if len(k) == 0 {
+ return append(b, "''"...)
+ }
+
+ for _, c := range k {
+ if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' {
+ continue
+ }
+
+ if c == literalQuote {
+ cannotUseLiteral = true
+ }
+
+ needsQuotation = true
+ }
+
+ if needsQuotation && needsQuoting(k) {
+ cannotUseLiteral = true
+ }
+
+ switch {
+ case cannotUseLiteral:
+ return enc.encodeQuotedString(false, b, k)
+ case needsQuotation:
+ return enc.encodeLiteralString(b, k)
+ default:
+ return enc.encodeUnquotedKey(b, k)
+ }
+}
+
+func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ if v.Type().Key().Kind() != reflect.String {
+ return nil, fmt.Errorf("toml: type %s is not supported as a map key", v.Type().Key().Kind())
+ }
+
+ var (
+ t table
+ emptyValueOptions valueOptions
+ )
+
+ iter := v.MapRange()
+ for iter.Next() {
+ k := iter.Key().String()
+ v := iter.Value()
+
+ if isNil(v) {
+ continue
+ }
+
+ if willConvertToTableOrArrayTable(ctx, v) {
+ t.pushTable(k, v, emptyValueOptions)
+ } else {
+ t.pushKV(k, v, emptyValueOptions)
+ }
+ }
+
+ sortEntriesByKey(t.kvs)
+ sortEntriesByKey(t.tables)
+
+ return enc.encodeTable(b, ctx, t)
+}
+
+func sortEntriesByKey(e []entry) {
+ sort.Slice(e, func(i, j int) bool {
+ return e[i].Key < e[j].Key
+ })
+}
+
+type entry struct {
+ Key string
+ Value reflect.Value
+ Options valueOptions
+}
+
+type table struct {
+ kvs []entry
+ tables []entry
+}
+
+func (t *table) pushKV(k string, v reflect.Value, options valueOptions) {
+ for _, e := range t.kvs {
+ if e.Key == k {
+ return
+ }
+ }
+
+ t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options})
+}
+
+func (t *table) pushTable(k string, v reflect.Value, options valueOptions) {
+ for _, e := range t.tables {
+ if e.Key == k {
+ return
+ }
+ }
+ t.tables = append(t.tables, entry{Key: k, Value: v, Options: options})
+}
+
+func walkStruct(ctx encoderCtx, t *table, v reflect.Value) {
+ // TODO: cache this
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ fieldType := typ.Field(i)
+
+ // only consider exported fields
+ if fieldType.PkgPath != "" {
+ continue
+ }
+
+ tag := fieldType.Tag.Get("toml")
+
+ // special field name to skip field
+ if tag == "-" {
+ continue
+ }
+
+ k, opts := parseTag(tag)
+ if !isValidName(k) {
+ k = ""
+ }
+
+ f := v.Field(i)
+
+ if k == "" {
+ if fieldType.Anonymous {
+ if fieldType.Type.Kind() == reflect.Struct {
+ walkStruct(ctx, t, f)
+ }
+ continue
+ } else {
+ k = fieldType.Name
+ }
+ }
+
+ if isNil(f) {
+ continue
+ }
+
+ options := valueOptions{
+ multiline: opts.multiline,
+ omitempty: opts.omitempty,
+ comment: fieldType.Tag.Get("comment"),
+ }
+
+ if opts.inline || !willConvertToTableOrArrayTable(ctx, f) {
+ t.pushKV(k, f, options)
+ } else {
+ t.pushTable(k, f, options)
+ }
+ }
+}
+
+func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ var t table
+
+ walkStruct(ctx, &t, v)
+
+ return enc.encodeTable(b, ctx, t)
+}
+
+func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte {
+ for len(comment) > 0 {
+ var line string
+ idx := strings.IndexByte(comment, '\n')
+ if idx >= 0 {
+ line = comment[:idx]
+ comment = comment[idx+1:]
+ } else {
+ line = comment
+ comment = ""
+ }
+ b = enc.indent(indent, b)
+ b = append(b, "# "...)
+ b = append(b, line...)
+ b = append(b, '\n')
+ }
+ return b
+}
+
+func isValidName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ case !unicode.IsLetter(c) && !unicode.IsDigit(c):
+ return false
+ }
+ }
+ return true
+}
+
+type tagOptions struct {
+ multiline bool
+ inline bool
+ omitempty bool
+}
+
+func parseTag(tag string) (string, tagOptions) {
+ opts := tagOptions{}
+
+ idx := strings.Index(tag, ",")
+ if idx == -1 {
+ return tag, opts
+ }
+
+ raw := tag[idx+1:]
+ tag = string(tag[:idx])
+ for raw != "" {
+ var o string
+ i := strings.Index(raw, ",")
+ if i >= 0 {
+ o, raw = raw[:i], raw[i+1:]
+ } else {
+ o, raw = raw, ""
+ }
+ switch o {
+ case "multiline":
+ opts.multiline = true
+ case "inline":
+ opts.inline = true
+ case "omitempty":
+ opts.omitempty = true
+ }
+ }
+
+ return tag, opts
+}
+
+func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) {
+ var err error
+
+ ctx.shiftKey()
+
+ if ctx.insideKv || (ctx.inline && !ctx.isRoot()) {
+ return enc.encodeTableInline(b, ctx, t)
+ }
+
+ if !ctx.skipTableHeader {
+ b, err = enc.encodeTableHeader(ctx, b)
+ if err != nil {
+ return nil, err
+ }
+
+ if enc.indentTables && len(ctx.parentKey) > 0 {
+ ctx.indent++
+ }
+ }
+ ctx.skipTableHeader = false
+
+ hasNonEmptyKV := false
+ for _, kv := range t.kvs {
+ if shouldOmitEmpty(kv.Options, kv.Value) {
+ continue
+ }
+ hasNonEmptyKV = true
+
+ ctx.setKey(kv.Key)
+
+ b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ b = append(b, '\n')
+ }
+
+ first := true
+ for _, table := range t.tables {
+ if shouldOmitEmpty(table.Options, table.Value) {
+ continue
+ }
+ if first {
+ first = false
+ if hasNonEmptyKV {
+ b = append(b, '\n')
+ }
+ } else {
+ b = append(b, "\n"...)
+ }
+
+ ctx.setKey(table.Key)
+
+ ctx.options = table.Options
+
+ b, err = enc.encode(b, ctx, table.Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return b, nil
+}
+
+func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) {
+ var err error
+
+ b = append(b, '{')
+
+ first := true
+ for _, kv := range t.kvs {
+ if shouldOmitEmpty(kv.Options, kv.Value) {
+ continue
+ }
+
+ if first {
+ first = false
+ } else {
+ b = append(b, `, `...)
+ }
+
+ ctx.setKey(kv.Key)
+
+ b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(t.tables) > 0 {
+ panic("inline table cannot contain nested tables, only key-values")
+ }
+
+ b = append(b, "}"...)
+
+ return b, nil
+}
+
+func willConvertToTable(ctx encoderCtx, v reflect.Value) bool {
+ if !v.IsValid() {
+ return false
+ }
+ if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) {
+ return false
+ }
+
+ t := v.Type()
+ switch t.Kind() {
+ case reflect.Map, reflect.Struct:
+ return !ctx.inline
+ case reflect.Interface:
+ return willConvertToTable(ctx, v.Elem())
+ case reflect.Ptr:
+ if v.IsNil() {
+ return false
+ }
+
+ return willConvertToTable(ctx, v.Elem())
+ default:
+ return false
+ }
+}
+
+func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool {
+ if ctx.insideKv {
+ return false
+ }
+ t := v.Type()
+
+ if t.Kind() == reflect.Interface {
+ return willConvertToTableOrArrayTable(ctx, v.Elem())
+ }
+
+ if t.Kind() == reflect.Slice {
+ if v.Len() == 0 {
+ // An empty slice should be a kv = [].
+ return false
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ t := willConvertToTable(ctx, v.Index(i))
+
+ if !t {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ return willConvertToTable(ctx, v)
+}
+
+func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ if v.Len() == 0 {
+ b = append(b, "[]"...)
+
+ return b, nil
+ }
+
+ if willConvertToTableOrArrayTable(ctx, v) {
+ return enc.encodeSliceAsArrayTable(b, ctx, v)
+ }
+
+ return enc.encodeSliceAsArray(b, ctx, v)
+}
+
+// caller should have checked that v is a slice that only contains values that
+// encode into tables.
+func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ ctx.shiftKey()
+
+ scratch := make([]byte, 0, 64)
+ scratch = append(scratch, "[["...)
+
+ for i, k := range ctx.parentKey {
+ if i > 0 {
+ scratch = append(scratch, '.')
+ }
+
+ scratch = enc.encodeKey(scratch, k)
+ }
+
+ scratch = append(scratch, "]]\n"...)
+ ctx.skipTableHeader = true
+
+ b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
+
+ for i := 0; i < v.Len(); i++ {
+ if i != 0 {
+ b = append(b, "\n"...)
+ }
+
+ b = append(b, scratch...)
+
+ var err error
+ b, err = enc.encode(b, ctx, v.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return b, nil
+}
+
+func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) {
+ multiline := ctx.options.multiline || enc.arraysMultiline
+ separator := ", "
+
+ b = append(b, '[')
+
+ subCtx := ctx
+ subCtx.options = valueOptions{}
+
+ if multiline {
+ separator = ",\n"
+
+ b = append(b, '\n')
+
+ subCtx.indent++
+ }
+
+ var err error
+ first := true
+
+ for i := 0; i < v.Len(); i++ {
+ if first {
+ first = false
+ } else {
+ b = append(b, separator...)
+ }
+
+ if multiline {
+ b = enc.indent(subCtx.indent, b)
+ }
+
+ b, err = enc.encode(b, subCtx, v.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if multiline {
+ b = append(b, '\n')
+ b = enc.indent(ctx.indent, b)
+ }
+
+ b = append(b, ']')
+
+ return b, nil
+}
+
+func (enc *Encoder) indent(level int, b []byte) []byte {
+ for i := 0; i < level; i++ {
+ b = append(b, enc.indentSymbol...)
+ }
+
+ return b
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go
new file mode 100644
index 000000000..9859a795b
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go
@@ -0,0 +1,1086 @@
+package toml
+
+import (
+ "bytes"
+ "unicode"
+
+ "github.com/pelletier/go-toml/v2/internal/ast"
+ "github.com/pelletier/go-toml/v2/internal/danger"
+)
+
+type parser struct {
+ builder ast.Builder
+ ref ast.Reference
+ data []byte
+ left []byte
+ err error
+ first bool
+}
+
+func (p *parser) Range(b []byte) ast.Range {
+ return ast.Range{
+ Offset: uint32(danger.SubsliceOffset(p.data, b)),
+ Length: uint32(len(b)),
+ }
+}
+
+func (p *parser) Raw(raw ast.Range) []byte {
+ return p.data[raw.Offset : raw.Offset+raw.Length]
+}
+
+func (p *parser) Reset(b []byte) {
+ p.builder.Reset()
+ p.ref = ast.InvalidReference
+ p.data = b
+ p.left = b
+ p.err = nil
+ p.first = true
+}
+
+//nolint:cyclop
+func (p *parser) NextExpression() bool {
+ if len(p.left) == 0 || p.err != nil {
+ return false
+ }
+
+ p.builder.Reset()
+ p.ref = ast.InvalidReference
+
+ for {
+ if len(p.left) == 0 || p.err != nil {
+ return false
+ }
+
+ if !p.first {
+ p.left, p.err = p.parseNewline(p.left)
+ }
+
+ if len(p.left) == 0 || p.err != nil {
+ return false
+ }
+
+ p.ref, p.left, p.err = p.parseExpression(p.left)
+
+ if p.err != nil {
+ return false
+ }
+
+ p.first = false
+
+ if p.ref.Valid() {
+ return true
+ }
+ }
+}
+
+func (p *parser) Expression() *ast.Node {
+ return p.builder.NodeAt(p.ref)
+}
+
+func (p *parser) Error() error {
+ return p.err
+}
+
+func (p *parser) parseNewline(b []byte) ([]byte, error) {
+ if b[0] == '\n' {
+ return b[1:], nil
+ }
+
+ if b[0] == '\r' {
+ _, rest, err := scanWindowsNewline(b)
+ return rest, err
+ }
+
+ return nil, newDecodeError(b[0:1], "expected newline but got %#U", b[0])
+}
+
+func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) {
+ // expression = ws [ comment ]
+ // expression =/ ws keyval ws [ comment ]
+ // expression =/ ws table ws [ comment ]
+ ref := ast.InvalidReference
+
+ b = p.parseWhitespace(b)
+
+ if len(b) == 0 {
+ return ref, b, nil
+ }
+
+ if b[0] == '#' {
+ _, rest, err := scanComment(b)
+ return ref, rest, err
+ }
+
+ if b[0] == '\n' || b[0] == '\r' {
+ return ref, b, nil
+ }
+
+ var err error
+ if b[0] == '[' {
+ ref, b, err = p.parseTable(b)
+ } else {
+ ref, b, err = p.parseKeyval(b)
+ }
+
+ if err != nil {
+ return ref, nil, err
+ }
+
+ b = p.parseWhitespace(b)
+
+ if len(b) > 0 && b[0] == '#' {
+ _, rest, err := scanComment(b)
+ return ref, rest, err
+ }
+
+ return ref, b, nil
+}
+
+func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) {
+ // table = std-table / array-table
+ if len(b) > 1 && b[1] == '[' {
+ return p.parseArrayTable(b)
+ }
+
+ return p.parseStdTable(b)
+}
+
+func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) {
+ // array-table = array-table-open key array-table-close
+ // array-table-open = %x5B.5B ws ; [[ Double left square bracket
+ // array-table-close = ws %x5D.5D ; ]] Double right square bracket
+ ref := p.builder.Push(ast.Node{
+ Kind: ast.ArrayTable,
+ })
+
+ b = b[2:]
+ b = p.parseWhitespace(b)
+
+ k, b, err := p.parseKey(b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ p.builder.AttachChild(ref, k)
+ b = p.parseWhitespace(b)
+
+ b, err = expect(']', b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ b, err = expect(']', b)
+
+ return ref, b, err
+}
+
+func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) {
+ // std-table = std-table-open key std-table-close
+ // std-table-open = %x5B ws ; [ Left square bracket
+ // std-table-close = ws %x5D ; ] Right square bracket
+ ref := p.builder.Push(ast.Node{
+ Kind: ast.Table,
+ })
+
+ b = b[1:]
+ b = p.parseWhitespace(b)
+
+ key, b, err := p.parseKey(b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ p.builder.AttachChild(ref, key)
+
+ b = p.parseWhitespace(b)
+
+ b, err = expect(']', b)
+
+ return ref, b, err
+}
+
+func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) {
+ // keyval = key keyval-sep val
+ ref := p.builder.Push(ast.Node{
+ Kind: ast.KeyValue,
+ })
+
+ key, b, err := p.parseKey(b)
+ if err != nil {
+ return ast.InvalidReference, nil, err
+ }
+
+ // keyval-sep = ws %x3D ws ; =
+
+ b = p.parseWhitespace(b)
+
+ if len(b) == 0 {
+ return ast.InvalidReference, nil, newDecodeError(b, "expected = after a key, but the document ends there")
+ }
+
+ b, err = expect('=', b)
+ if err != nil {
+ return ast.InvalidReference, nil, err
+ }
+
+ b = p.parseWhitespace(b)
+
+ valRef, b, err := p.parseVal(b)
+ if err != nil {
+ return ref, b, err
+ }
+
+ p.builder.Chain(valRef, key)
+ p.builder.AttachChild(ref, valRef)
+
+ return ref, b, err
+}
+
+//nolint:cyclop,funlen
+func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) {
+ // val = string / boolean / array / inline-table / date-time / float / integer
+ ref := ast.InvalidReference
+
+ if len(b) == 0 {
+ return ref, nil, newDecodeError(b, "expected value, not eof")
+ }
+
+ var err error
+ c := b[0]
+
+ switch c {
+ case '"':
+ var raw []byte
+ var v []byte
+ if scanFollowsMultilineBasicStringDelimiter(b) {
+ raw, v, b, err = p.parseMultilineBasicString(b)
+ } else {
+ raw, v, b, err = p.parseBasicString(b)
+ }
+
+ if err == nil {
+ ref = p.builder.Push(ast.Node{
+ Kind: ast.String,
+ Raw: p.Range(raw),
+ Data: v,
+ })
+ }
+
+ return ref, b, err
+ case '\'':
+ var raw []byte
+ var v []byte
+ if scanFollowsMultilineLiteralStringDelimiter(b) {
+ raw, v, b, err = p.parseMultilineLiteralString(b)
+ } else {
+ raw, v, b, err = p.parseLiteralString(b)
+ }
+
+ if err == nil {
+ ref = p.builder.Push(ast.Node{
+ Kind: ast.String,
+ Raw: p.Range(raw),
+ Data: v,
+ })
+ }
+
+ return ref, b, err
+ case 't':
+ if !scanFollowsTrue(b) {
+ return ref, nil, newDecodeError(atmost(b, 4), "expected 'true'")
+ }
+
+ ref = p.builder.Push(ast.Node{
+ Kind: ast.Bool,
+ Data: b[:4],
+ })
+
+ return ref, b[4:], nil
+ case 'f':
+ if !scanFollowsFalse(b) {
+ return ref, nil, newDecodeError(atmost(b, 5), "expected 'false'")
+ }
+
+ ref = p.builder.Push(ast.Node{
+ Kind: ast.Bool,
+ Data: b[:5],
+ })
+
+ return ref, b[5:], nil
+ case '[':
+ return p.parseValArray(b)
+ case '{':
+ return p.parseInlineTable(b)
+ default:
+ return p.parseIntOrFloatOrDateTime(b)
+ }
+}
+
+func atmost(b []byte, n int) []byte {
+ if n >= len(b) {
+ return b
+ }
+
+ return b[:n]
+}
+
+func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
+ v, rest, err := scanLiteralString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return v, v[1 : len(v)-1], rest, nil
+}
+
+func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) {
+ // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
+ // inline-table-open = %x7B ws ; {
+ // inline-table-close = ws %x7D ; }
+ // inline-table-sep = ws %x2C ws ; , Comma
+ // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
+ parent := p.builder.Push(ast.Node{
+ Kind: ast.InlineTable,
+ })
+
+ first := true
+
+ var child ast.Reference
+
+ b = b[1:]
+
+ var err error
+
+ for len(b) > 0 {
+ previousB := b
+ b = p.parseWhitespace(b)
+
+ if len(b) == 0 {
+ return parent, nil, newDecodeError(previousB[:1], "inline table is incomplete")
+ }
+
+ if b[0] == '}' {
+ break
+ }
+
+ if !first {
+ b, err = expect(',', b)
+ if err != nil {
+ return parent, nil, err
+ }
+ b = p.parseWhitespace(b)
+ }
+
+ var kv ast.Reference
+
+ kv, b, err = p.parseKeyval(b)
+ if err != nil {
+ return parent, nil, err
+ }
+
+ if first {
+ p.builder.AttachChild(parent, kv)
+ } else {
+ p.builder.Chain(child, kv)
+ }
+ child = kv
+
+ first = false
+ }
+
+ rest, err := expect('}', b)
+
+ return parent, rest, err
+}
+
+//nolint:funlen,cyclop
+func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
+ // array = array-open [ array-values ] ws-comment-newline array-close
+ // array-open = %x5B ; [
+ // array-close = %x5D ; ]
+ // array-values = ws-comment-newline val ws-comment-newline array-sep array-values
+ // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ]
+ // array-sep = %x2C ; , Comma
+ // ws-comment-newline = *( wschar / [ comment ] newline )
+ arrayStart := b
+ b = b[1:]
+
+ parent := p.builder.Push(ast.Node{
+ Kind: ast.Array,
+ })
+
+ first := true
+
+ var lastChild ast.Reference
+
+ var err error
+ for len(b) > 0 {
+ b, err = p.parseOptionalWhitespaceCommentNewline(b)
+ if err != nil {
+ return parent, nil, err
+ }
+
+ if len(b) == 0 {
+ return parent, nil, newDecodeError(arrayStart[:1], "array is incomplete")
+ }
+
+ if b[0] == ']' {
+ break
+ }
+
+ if b[0] == ',' {
+ if first {
+ return parent, nil, newDecodeError(b[0:1], "array cannot start with comma")
+ }
+ b = b[1:]
+
+ b, err = p.parseOptionalWhitespaceCommentNewline(b)
+ if err != nil {
+ return parent, nil, err
+ }
+ } else if !first {
+ return parent, nil, newDecodeError(b[0:1], "array elements must be separated by commas")
+ }
+
+ // TOML allows trailing commas in arrays.
+ if len(b) > 0 && b[0] == ']' {
+ break
+ }
+
+ var valueRef ast.Reference
+ valueRef, b, err = p.parseVal(b)
+ if err != nil {
+ return parent, nil, err
+ }
+
+ if first {
+ p.builder.AttachChild(parent, valueRef)
+ } else {
+ p.builder.Chain(lastChild, valueRef)
+ }
+ lastChild = valueRef
+
+ b, err = p.parseOptionalWhitespaceCommentNewline(b)
+ if err != nil {
+ return parent, nil, err
+ }
+ first = false
+ }
+
+ rest, err := expect(']', b)
+
+ return parent, rest, err
+}
+
+func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) {
+ for len(b) > 0 {
+ var err error
+ b = p.parseWhitespace(b)
+
+ if len(b) > 0 && b[0] == '#' {
+ _, b, err = scanComment(b)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(b) == 0 {
+ break
+ }
+
+ if b[0] == '\n' || b[0] == '\r' {
+ b, err = p.parseNewline(b)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ break
+ }
+ }
+
+ return b, nil
+}
+
+func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) {
+ token, rest, err := scanMultilineLiteralString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ i := 3
+
+ // skip the immediate new line
+ if token[i] == '\n' {
+ i++
+ } else if token[i] == '\r' && token[i+1] == '\n' {
+ i += 2
+ }
+
+ return token, token[i : len(token)-3], rest, err
+}
+
+//nolint:funlen,gocognit,cyclop
+func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) {
+ // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
+ // ml-basic-string-delim
+ // ml-basic-string-delim = 3quotation-mark
+ // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
+ //
+ // mlb-content = mlb-char / newline / mlb-escaped-nl
+ // mlb-char = mlb-unescaped / escaped
+ // mlb-quotes = 1*2quotation-mark
+ // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // mlb-escaped-nl = escape ws newline *( wschar / newline )
+ token, escaped, rest, err := scanMultilineBasicString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ i := 3
+
+ // skip the immediate new line
+ if token[i] == '\n' {
+ i++
+ } else if token[i] == '\r' && token[i+1] == '\n' {
+ i += 2
+ }
+
+ // fast path
+ startIdx := i
+ endIdx := len(token) - len(`"""`)
+
+ if !escaped {
+ str := token[startIdx:endIdx]
+ verr := utf8TomlValidAlreadyEscaped(str)
+ if verr.Zero() {
+ return token, str, rest, nil
+ }
+ return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
+ }
+
+ var builder bytes.Buffer
+
+ // The scanner ensures that the token starts and ends with quotes and that
+ // escapes are balanced.
+ for i < len(token)-3 {
+ c := token[i]
+
+ //nolint:nestif
+ if c == '\\' {
+ // When the last non-whitespace character on a line is an unescaped \,
+ // it will be trimmed along with all whitespace (including newlines) up
+ // to the next non-whitespace character or closing delimiter.
+
+ isLastNonWhitespaceOnLine := false
+ j := 1
+ findEOLLoop:
+ for ; j < len(token)-3-i; j++ {
+ switch token[i+j] {
+ case ' ', '\t':
+ continue
+ case '\r':
+ if token[i+j+1] == '\n' {
+ continue
+ }
+ case '\n':
+ isLastNonWhitespaceOnLine = true
+ }
+ break findEOLLoop
+ }
+ if isLastNonWhitespaceOnLine {
+ i += j
+ for ; i < len(token)-3; i++ {
+ c := token[i]
+ if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') {
+ i--
+ break
+ }
+ }
+ i++
+ continue
+ }
+
+ // handle escaping
+ i++
+ c = token[i]
+
+ switch c {
+ case '"', '\\':
+ builder.WriteByte(c)
+ case 'b':
+ builder.WriteByte('\b')
+ case 'f':
+ builder.WriteByte('\f')
+ case 'n':
+ builder.WriteByte('\n')
+ case 'r':
+ builder.WriteByte('\r')
+ case 't':
+ builder.WriteByte('\t')
+ case 'e':
+ builder.WriteByte(0x1B)
+ case 'u':
+ x, err := hexToRune(atmost(token[i+1:], 4), 4)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ builder.WriteRune(x)
+ i += 4
+ case 'U':
+ x, err := hexToRune(atmost(token[i+1:], 8), 8)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ builder.WriteRune(x)
+ i += 8
+ default:
+ return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c)
+ }
+ i++
+ } else {
+ size := utf8ValidNext(token[i:])
+ if size == 0 {
+ return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c)
+ }
+ builder.Write(token[i : i+size])
+ i += size
+ }
+ }
+
+ return token, builder.Bytes(), rest, nil
+}
+
+func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) {
+ // key = simple-key / dotted-key
+ // simple-key = quoted-key / unquoted-key
+ //
+ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+ // quoted-key = basic-string / literal-string
+ // dotted-key = simple-key 1*( dot-sep simple-key )
+ //
+ // dot-sep = ws %x2E ws ; . Period
+ raw, key, b, err := p.parseSimpleKey(b)
+ if err != nil {
+ return ast.InvalidReference, nil, err
+ }
+
+ ref := p.builder.Push(ast.Node{
+ Kind: ast.Key,
+ Raw: p.Range(raw),
+ Data: key,
+ })
+
+ for {
+ b = p.parseWhitespace(b)
+ if len(b) > 0 && b[0] == '.' {
+ b = p.parseWhitespace(b[1:])
+
+ raw, key, b, err = p.parseSimpleKey(b)
+ if err != nil {
+ return ref, nil, err
+ }
+
+ p.builder.PushAndChain(ast.Node{
+ Kind: ast.Key,
+ Raw: p.Range(raw),
+ Data: key,
+ })
+ } else {
+ break
+ }
+ }
+
+ return ref, b, nil
+}
+
+func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
+ if len(b) == 0 {
+ return nil, nil, nil, newDecodeError(b, "expected key but found none")
+ }
+
+ // simple-key = quoted-key / unquoted-key
+ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+ // quoted-key = basic-string / literal-string
+ switch {
+ case b[0] == '\'':
+ return p.parseLiteralString(b)
+ case b[0] == '"':
+ return p.parseBasicString(b)
+ case isUnquotedKeyChar(b[0]):
+ key, rest = scanUnquotedKey(b)
+ return key, key, rest, nil
+ default:
+ return nil, nil, nil, newDecodeError(b[0:1], "invalid character at start of key: %c", b[0])
+ }
+}
+
+//nolint:funlen,cyclop
+func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
+ // basic-string = quotation-mark *basic-char quotation-mark
+ // quotation-mark = %x22 ; "
+ // basic-char = basic-unescaped / escaped
+ // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // escaped = escape escape-seq-char
+ // escape-seq-char = %x22 ; " quotation mark U+0022
+ // escape-seq-char =/ %x5C ; \ reverse solidus U+005C
+ // escape-seq-char =/ %x62 ; b backspace U+0008
+ // escape-seq-char =/ %x66 ; f form feed U+000C
+ // escape-seq-char =/ %x6E ; n line feed U+000A
+ // escape-seq-char =/ %x72 ; r carriage return U+000D
+ // escape-seq-char =/ %x74 ; t tab U+0009
+ // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
+ // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
+ token, escaped, rest, err := scanBasicString(b)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ startIdx := len(`"`)
+ endIdx := len(token) - len(`"`)
+
+ // Fast path. If there is no escape sequence, the string should just be
+ // an UTF-8 encoded string, which is the same as Go. In that case,
+ // validate the string and return a direct reference to the buffer.
+ if !escaped {
+ str := token[startIdx:endIdx]
+ verr := utf8TomlValidAlreadyEscaped(str)
+ if verr.Zero() {
+ return token, str, rest, nil
+ }
+ return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
+ }
+
+ i := startIdx
+
+ var builder bytes.Buffer
+
+ // The scanner ensures that the token starts and ends with quotes and that
+ // escapes are balanced.
+ for i < len(token)-1 {
+ c := token[i]
+ if c == '\\' {
+ i++
+ c = token[i]
+
+ switch c {
+ case '"', '\\':
+ builder.WriteByte(c)
+ case 'b':
+ builder.WriteByte('\b')
+ case 'f':
+ builder.WriteByte('\f')
+ case 'n':
+ builder.WriteByte('\n')
+ case 'r':
+ builder.WriteByte('\r')
+ case 't':
+ builder.WriteByte('\t')
+ case 'e':
+ builder.WriteByte(0x1B)
+ case 'u':
+ x, err := hexToRune(token[i+1:len(token)-1], 4)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ builder.WriteRune(x)
+ i += 4
+ case 'U':
+ x, err := hexToRune(token[i+1:len(token)-1], 8)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ builder.WriteRune(x)
+ i += 8
+ default:
+ return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c)
+ }
+ i++
+ } else {
+ size := utf8ValidNext(token[i:])
+ if size == 0 {
+ return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c)
+ }
+ builder.Write(token[i : i+size])
+ i += size
+ }
+ }
+
+ return token, builder.Bytes(), rest, nil
+}
+
+func hexToRune(b []byte, length int) (rune, error) {
+ if len(b) < length {
+ return -1, newDecodeError(b, "unicode point needs %d character, not %d", length, len(b))
+ }
+ b = b[:length]
+
+ var r uint32
+ for i, c := range b {
+ d := uint32(0)
+ switch {
+ case '0' <= c && c <= '9':
+ d = uint32(c - '0')
+ case 'a' <= c && c <= 'f':
+ d = uint32(c - 'a' + 10)
+ case 'A' <= c && c <= 'F':
+ d = uint32(c - 'A' + 10)
+ default:
+ return -1, newDecodeError(b[i:i+1], "non-hex character")
+ }
+ r = r*16 + d
+ }
+
+ if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 {
+ return -1, newDecodeError(b, "escape sequence is invalid Unicode code point")
+ }
+
+ return rune(r), nil
+}
+
+func (p *parser) parseWhitespace(b []byte) []byte {
+ // ws = *wschar
+ // wschar = %x20 ; Space
+ // wschar =/ %x09 ; Horizontal tab
+ _, rest := scanWhitespace(b)
+
+ return rest
+}
+
+//nolint:cyclop
+func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, error) {
+ switch b[0] {
+ case 'i':
+ if !scanFollowsInf(b) {
+ return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'inf'")
+ }
+
+ return p.builder.Push(ast.Node{
+ Kind: ast.Float,
+ Data: b[:3],
+ }), b[3:], nil
+ case 'n':
+ if !scanFollowsNan(b) {
+ return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'nan'")
+ }
+
+ return p.builder.Push(ast.Node{
+ Kind: ast.Float,
+ Data: b[:3],
+ }), b[3:], nil
+ case '+', '-':
+ return p.scanIntOrFloat(b)
+ }
+
+ if len(b) < 3 {
+ return p.scanIntOrFloat(b)
+ }
+
+ s := 5
+ if len(b) < s {
+ s = len(b)
+ }
+
+ for idx, c := range b[:s] {
+ if isDigit(c) {
+ continue
+ }
+
+ if idx == 2 && c == ':' || (idx == 4 && c == '-') {
+ return p.scanDateTime(b)
+ }
+
+ break
+ }
+
+ return p.scanIntOrFloat(b)
+}
+
+func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) {
+ // scans for contiguous characters in [0-9T:Z.+-], and up to one space if
+ // followed by a digit.
+ hasDate := false
+ hasTime := false
+ hasTz := false
+ seenSpace := false
+
+ i := 0
+byteLoop:
+ for ; i < len(b); i++ {
+ c := b[i]
+
+ switch {
+ case isDigit(c):
+ case c == '-':
+ hasDate = true
+ const minOffsetOfTz = 8
+ if i >= minOffsetOfTz {
+ hasTz = true
+ }
+ case c == 'T' || c == 't' || c == ':' || c == '.':
+ hasTime = true
+ case c == '+' || c == '-' || c == 'Z' || c == 'z':
+ hasTz = true
+ case c == ' ':
+ if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) {
+ i += 2
+ // Avoid reaching past the end of the document in case the time
+ // is malformed. See TestIssue585.
+ if i >= len(b) {
+ i--
+ }
+ seenSpace = true
+ hasTime = true
+ } else {
+ break byteLoop
+ }
+ default:
+ break byteLoop
+ }
+ }
+
+ var kind ast.Kind
+
+ if hasTime {
+ if hasDate {
+ if hasTz {
+ kind = ast.DateTime
+ } else {
+ kind = ast.LocalDateTime
+ }
+ } else {
+ kind = ast.LocalTime
+ }
+ } else {
+ kind = ast.LocalDate
+ }
+
+ return p.builder.Push(ast.Node{
+ Kind: kind,
+ Data: b[:i],
+ }), b[i:], nil
+}
+
+//nolint:funlen,gocognit,cyclop
+func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) {
+ i := 0
+
+ if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' {
+ var isValidRune validRuneFn
+
+ switch b[1] {
+ case 'x':
+ isValidRune = isValidHexRune
+ case 'o':
+ isValidRune = isValidOctalRune
+ case 'b':
+ isValidRune = isValidBinaryRune
+ default:
+ i++
+ }
+
+ if isValidRune != nil {
+ i += 2
+ for ; i < len(b); i++ {
+ if !isValidRune(b[i]) {
+ break
+ }
+ }
+ }
+
+ return p.builder.Push(ast.Node{
+ Kind: ast.Integer,
+ Data: b[:i],
+ }), b[i:], nil
+ }
+
+ isFloat := false
+
+ for ; i < len(b); i++ {
+ c := b[i]
+
+ if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' {
+ continue
+ }
+
+ if c == '.' || c == 'e' || c == 'E' {
+ isFloat = true
+
+ continue
+ }
+
+ if c == 'i' {
+ if scanFollowsInf(b[i:]) {
+ return p.builder.Push(ast.Node{
+ Kind: ast.Float,
+ Data: b[:i+3],
+ }), b[i+3:], nil
+ }
+
+ return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'i' while scanning for a number")
+ }
+
+ if c == 'n' {
+ if scanFollowsNan(b[i:]) {
+ return p.builder.Push(ast.Node{
+ Kind: ast.Float,
+ Data: b[:i+3],
+ }), b[i+3:], nil
+ }
+
+ return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'n' while scanning for a number")
+ }
+
+ break
+ }
+
+ if i == 0 {
+ return ast.InvalidReference, b, newDecodeError(b, "incomplete number")
+ }
+
+ kind := ast.Integer
+
+ if isFloat {
+ kind = ast.Float
+ }
+
+ return p.builder.Push(ast.Node{
+ Kind: kind,
+ Data: b[:i],
+ }), b[i:], nil
+}
+
+func isDigit(r byte) bool {
+ return r >= '0' && r <= '9'
+}
+
+type validRuneFn func(r byte) bool
+
+func isValidHexRune(r byte) bool {
+ return r >= 'a' && r <= 'f' ||
+ r >= 'A' && r <= 'F' ||
+ r >= '0' && r <= '9' ||
+ r == '_'
+}
+
+func isValidOctalRune(r byte) bool {
+ return r >= '0' && r <= '7' || r == '_'
+}
+
+func isValidBinaryRune(r byte) bool {
+ return r == '0' || r == '1' || r == '_'
+}
+
+func expect(x byte, b []byte) ([]byte, error) {
+ if len(b) == 0 {
+ return nil, newDecodeError(b, "expected character %c but the document ended here", x)
+ }
+
+ if b[0] != x {
+ return nil, newDecodeError(b[0:1], "expected character %c", x)
+ }
+
+ return b[1:], nil
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go
new file mode 100644
index 000000000..bb445fab4
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go
@@ -0,0 +1,269 @@
+package toml
+
+func scanFollows(b []byte, pattern string) bool {
+ n := len(pattern)
+
+ return len(b) >= n && string(b[:n]) == pattern
+}
+
+func scanFollowsMultilineBasicStringDelimiter(b []byte) bool {
+ return scanFollows(b, `"""`)
+}
+
+func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool {
+ return scanFollows(b, `'''`)
+}
+
+func scanFollowsTrue(b []byte) bool {
+ return scanFollows(b, `true`)
+}
+
+func scanFollowsFalse(b []byte) bool {
+ return scanFollows(b, `false`)
+}
+
+func scanFollowsInf(b []byte) bool {
+ return scanFollows(b, `inf`)
+}
+
+func scanFollowsNan(b []byte) bool {
+ return scanFollows(b, `nan`)
+}
+
+func scanUnquotedKey(b []byte) ([]byte, []byte) {
+ // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+ for i := 0; i < len(b); i++ {
+ if !isUnquotedKeyChar(b[i]) {
+ return b[:i], b[i:]
+ }
+ }
+
+ return b, b[len(b):]
+}
+
+func isUnquotedKeyChar(r byte) bool {
+ return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_'
+}
+
+func scanLiteralString(b []byte) ([]byte, []byte, error) {
+ // literal-string = apostrophe *literal-char apostrophe
+ // apostrophe = %x27 ; ' apostrophe
+ // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
+ for i := 1; i < len(b); {
+ switch b[i] {
+ case '\'':
+ return b[:i+1], b[i+1:], nil
+ case '\n', '\r':
+ return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines")
+ }
+ size := utf8ValidNext(b[i:])
+ if size == 0 {
+ return nil, nil, newDecodeError(b[i:i+1], "invalid character")
+ }
+ i += size
+ }
+
+ return nil, nil, newDecodeError(b[len(b):], "unterminated literal string")
+}
+
+func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
+ // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
+ // ml-literal-string-delim
+ // ml-literal-string-delim = 3apostrophe
+ // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
+ //
+ // mll-content = mll-char / newline
+ // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
+ // mll-quotes = 1*2apostrophe
+ for i := 3; i < len(b); {
+ switch b[i] {
+ case '\'':
+ if scanFollowsMultilineLiteralStringDelimiter(b[i:]) {
+ i += 3
+
+ // At that point we found 3 apostrophe, and i is the
+ // index of the byte after the third one. The scanner
+ // needs to be eager, because there can be an extra 2
+ // apostrophe that can be accepted at the end of the
+ // string.
+
+ if i >= len(b) || b[i] != '\'' {
+ return b[:i], b[i:], nil
+ }
+ i++
+
+ if i >= len(b) || b[i] != '\'' {
+ return b[:i], b[i:], nil
+ }
+ i++
+
+ if i < len(b) && b[i] == '\'' {
+ return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string")
+ }
+
+ return b[:i], b[i:], nil
+ }
+ case '\r':
+ if len(b) < i+2 {
+ return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`)
+ }
+ if b[i+1] != '\n' {
+ return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`)
+ }
+ i += 2 // skip the \n
+ continue
+ }
+ size := utf8ValidNext(b[i:])
+ if size == 0 {
+ return nil, nil, newDecodeError(b[i:i+1], "invalid character")
+ }
+ i += size
+ }
+
+ return nil, nil, newDecodeError(b[len(b):], `multiline literal string not terminated by '''`)
+}
+
+func scanWindowsNewline(b []byte) ([]byte, []byte, error) {
+ const lenCRLF = 2
+ if len(b) < lenCRLF {
+ return nil, nil, newDecodeError(b, "windows new line expected")
+ }
+
+ if b[1] != '\n' {
+ return nil, nil, newDecodeError(b, `windows new line should be \r\n`)
+ }
+
+ return b[:lenCRLF], b[lenCRLF:], nil
+}
+
+func scanWhitespace(b []byte) ([]byte, []byte) {
+ for i := 0; i < len(b); i++ {
+ switch b[i] {
+ case ' ', '\t':
+ continue
+ default:
+ return b[:i], b[i:]
+ }
+ }
+
+ return b, b[len(b):]
+}
+
+//nolint:unparam
+func scanComment(b []byte) ([]byte, []byte, error) {
+ // comment-start-symbol = %x23 ; #
+ // non-ascii = %x80-D7FF / %xE000-10FFFF
+ // non-eol = %x09 / %x20-7F / non-ascii
+ //
+ // comment = comment-start-symbol *non-eol
+
+ for i := 1; i < len(b); {
+ if b[i] == '\n' {
+ return b[:i], b[i:], nil
+ }
+ if b[i] == '\r' {
+ if i+1 < len(b) && b[i+1] == '\n' {
+ return b[:i+1], b[i+1:], nil
+ }
+ return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment")
+ }
+ size := utf8ValidNext(b[i:])
+ if size == 0 {
+ return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment")
+ }
+
+ i += size
+ }
+
+ return b, b[len(b):], nil
+}
+
+func scanBasicString(b []byte) ([]byte, bool, []byte, error) {
+ // basic-string = quotation-mark *basic-char quotation-mark
+ // quotation-mark = %x22 ; "
+ // basic-char = basic-unescaped / escaped
+ // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // escaped = escape escape-seq-char
+ escaped := false
+ i := 1
+
+ for ; i < len(b); i++ {
+ switch b[i] {
+ case '"':
+ return b[:i+1], escaped, b[i+1:], nil
+ case '\n', '\r':
+ return nil, escaped, nil, newDecodeError(b[i:i+1], "basic strings cannot have new lines")
+ case '\\':
+ if len(b) < i+2 {
+ return nil, escaped, nil, newDecodeError(b[i:i+1], "need a character after \\")
+ }
+ escaped = true
+ i++ // skip the next character
+ }
+ }
+
+ return nil, escaped, nil, newDecodeError(b[len(b):], `basic string not terminated by "`)
+}
+
+func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
+ // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
+ // ml-basic-string-delim
+ // ml-basic-string-delim = 3quotation-mark
+ // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
+ //
+ // mlb-content = mlb-char / newline / mlb-escaped-nl
+ // mlb-char = mlb-unescaped / escaped
+ // mlb-quotes = 1*2quotation-mark
+ // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+ // mlb-escaped-nl = escape ws newline *( wschar / newline )
+
+ escaped := false
+ i := 3
+
+ for ; i < len(b); i++ {
+ switch b[i] {
+ case '"':
+ if scanFollowsMultilineBasicStringDelimiter(b[i:]) {
+ i += 3
+
+ // At that point we found 3 apostrophe, and i is the
+ // index of the byte after the third one. The scanner
+ // needs to be eager, because there can be an extra 2
+ // apostrophe that can be accepted at the end of the
+ // string.
+
+ if i >= len(b) || b[i] != '"' {
+ return b[:i], escaped, b[i:], nil
+ }
+ i++
+
+ if i >= len(b) || b[i] != '"' {
+ return b[:i], escaped, b[i:], nil
+ }
+ i++
+
+ if i < len(b) && b[i] == '"' {
+ return nil, escaped, nil, newDecodeError(b[i-3:i+1], `""" not allowed in multiline basic string`)
+ }
+
+ return b[:i], escaped, b[i:], nil
+ }
+ case '\\':
+ if len(b) < i+2 {
+ return nil, escaped, nil, newDecodeError(b[len(b):], "need a character after \\")
+ }
+ escaped = true
+ i++ // skip the next character
+ case '\r':
+ if len(b) < i+2 {
+ return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`)
+ }
+ if b[i+1] != '\n' {
+ return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`)
+ }
+ i++ // skip the \n
+ }
+ }
+
+ return nil, escaped, nil, newDecodeError(b[len(b):], `multiline basic string not terminated by """`)
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go
new file mode 100644
index 000000000..b7830d139
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go
@@ -0,0 +1,107 @@
+package toml
+
+import (
+ "github.com/pelletier/go-toml/v2/internal/ast"
+ "github.com/pelletier/go-toml/v2/internal/danger"
+ "github.com/pelletier/go-toml/v2/internal/tracker"
+)
+
+type strict struct {
+ Enabled bool
+
+ // Tracks the current key being processed.
+ key tracker.KeyTracker
+
+ missing []decodeError
+}
+
+func (s *strict) EnterTable(node *ast.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.UpdateTable(node)
+}
+
+func (s *strict) EnterArrayTable(node *ast.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.UpdateArrayTable(node)
+}
+
+func (s *strict) EnterKeyValue(node *ast.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.Push(node)
+}
+
+func (s *strict) ExitKeyValue(node *ast.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.key.Pop(node)
+}
+
+func (s *strict) MissingTable(node *ast.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.missing = append(s.missing, decodeError{
+ highlight: keyLocation(node),
+ message: "missing table",
+ key: s.key.Key(),
+ })
+}
+
+func (s *strict) MissingField(node *ast.Node) {
+ if !s.Enabled {
+ return
+ }
+
+ s.missing = append(s.missing, decodeError{
+ highlight: keyLocation(node),
+ message: "missing field",
+ key: s.key.Key(),
+ })
+}
+
+func (s *strict) Error(doc []byte) error {
+ if !s.Enabled || len(s.missing) == 0 {
+ return nil
+ }
+
+ err := &StrictMissingError{
+ Errors: make([]DecodeError, 0, len(s.missing)),
+ }
+
+ for _, derr := range s.missing {
+ derr := derr
+ err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr))
+ }
+
+ return err
+}
+
+func keyLocation(node *ast.Node) []byte {
+ k := node.Key()
+
+ hasOne := k.Next()
+ if !hasOne {
+ panic("should not be called with empty key")
+ }
+
+ start := k.Node().Data
+ end := k.Node().Data
+
+ for k.Next() {
+ end = k.Node().Data
+ }
+
+ return danger.BytesRange(start, end)
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/test/integration/vendor/github.com/pelletier/go-toml/v2/toml.abnf
new file mode 100644
index 000000000..473f3749e
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/toml.abnf
@@ -0,0 +1,243 @@
+;; This document describes TOML's syntax, using the ABNF format (defined in
+;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt).
+;;
+;; All valid TOML documents will match this description, however certain
+;; invalid documents would need to be rejected as per the semantics described
+;; in the supporting text description.
+
+;; It is possible to try this grammar interactively, using instaparse.
+;; http://instaparse.mojombo.com/
+;;
+;; To do so, in the lower right, click on Options and change `:input-format` to
+;; ':abnf'. Then paste this entire ABNF document into the grammar entry box
+;; (above the options). Then you can type or paste a sample TOML document into
+;; the beige box on the left. Tada!
+
+;; Overall Structure
+
+toml = expression *( newline expression )
+
+expression = ws [ comment ]
+expression =/ ws keyval ws [ comment ]
+expression =/ ws table ws [ comment ]
+
+;; Whitespace
+
+ws = *wschar
+wschar = %x20 ; Space
+wschar =/ %x09 ; Horizontal tab
+
+;; Newline
+
+newline = %x0A ; LF
+newline =/ %x0D.0A ; CRLF
+
+;; Comment
+
+comment-start-symbol = %x23 ; #
+non-ascii = %x80-D7FF / %xE000-10FFFF
+non-eol = %x09 / %x20-7F / non-ascii
+
+comment = comment-start-symbol *non-eol
+
+;; Key-Value pairs
+
+keyval = key keyval-sep val
+
+key = simple-key / dotted-key
+simple-key = quoted-key / unquoted-key
+
+unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
+quoted-key = basic-string / literal-string
+dotted-key = simple-key 1*( dot-sep simple-key )
+
+dot-sep = ws %x2E ws ; . Period
+keyval-sep = ws %x3D ws ; =
+
+val = string / boolean / array / inline-table / date-time / float / integer
+
+;; String
+
+string = ml-basic-string / basic-string / ml-literal-string / literal-string
+
+;; Basic String
+
+basic-string = quotation-mark *basic-char quotation-mark
+
+quotation-mark = %x22 ; "
+
+basic-char = basic-unescaped / escaped
+basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+escaped = escape escape-seq-char
+
+escape = %x5C ; \
+escape-seq-char = %x22 ; " quotation mark U+0022
+escape-seq-char =/ %x5C ; \ reverse solidus U+005C
+escape-seq-char =/ %x62 ; b backspace U+0008
+escape-seq-char =/ %x66 ; f form feed U+000C
+escape-seq-char =/ %x6E ; n line feed U+000A
+escape-seq-char =/ %x72 ; r carriage return U+000D
+escape-seq-char =/ %x74 ; t tab U+0009
+escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
+escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
+
+;; Multiline Basic String
+
+ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
+ ml-basic-string-delim
+ml-basic-string-delim = 3quotation-mark
+ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
+
+mlb-content = mlb-char / newline / mlb-escaped-nl
+mlb-char = mlb-unescaped / escaped
+mlb-quotes = 1*2quotation-mark
+mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
+mlb-escaped-nl = escape ws newline *( wschar / newline )
+
+;; Literal String
+
+literal-string = apostrophe *literal-char apostrophe
+
+apostrophe = %x27 ; ' apostrophe
+
+literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
+
+;; Multiline Literal String
+
+ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
+ ml-literal-string-delim
+ml-literal-string-delim = 3apostrophe
+ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
+
+mll-content = mll-char / newline
+mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
+mll-quotes = 1*2apostrophe
+
+;; Integer
+
+integer = dec-int / hex-int / oct-int / bin-int
+
+minus = %x2D ; -
+plus = %x2B ; +
+underscore = %x5F ; _
+digit1-9 = %x31-39 ; 1-9
+digit0-7 = %x30-37 ; 0-7
+digit0-1 = %x30-31 ; 0-1
+
+hex-prefix = %x30.78 ; 0x
+oct-prefix = %x30.6F ; 0o
+bin-prefix = %x30.62 ; 0b
+
+dec-int = [ minus / plus ] unsigned-dec-int
+unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT )
+
+hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG )
+oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 )
+bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 )
+
+;; Float
+
+float = float-int-part ( exp / frac [ exp ] )
+float =/ special-float
+
+float-int-part = dec-int
+frac = decimal-point zero-prefixable-int
+decimal-point = %x2E ; .
+zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT )
+
+exp = "e" float-exp-part
+float-exp-part = [ minus / plus ] zero-prefixable-int
+
+special-float = [ minus / plus ] ( inf / nan )
+inf = %x69.6e.66 ; inf
+nan = %x6e.61.6e ; nan
+
+;; Boolean
+
+boolean = true / false
+
+true = %x74.72.75.65 ; true
+false = %x66.61.6C.73.65 ; false
+
+;; Date and Time (as defined in RFC 3339)
+
+date-time = offset-date-time / local-date-time / local-date / local-time
+
+date-fullyear = 4DIGIT
+date-month = 2DIGIT ; 01-12
+date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
+time-delim = "T" / %x20 ; T, t, or space
+time-hour = 2DIGIT ; 00-23
+time-minute = 2DIGIT ; 00-59
+time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules
+time-secfrac = "." 1*DIGIT
+time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
+time-offset = "Z" / time-numoffset
+
+partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ]
+full-date = date-fullyear "-" date-month "-" date-mday
+full-time = partial-time time-offset
+
+;; Offset Date-Time
+
+offset-date-time = full-date time-delim full-time
+
+;; Local Date-Time
+
+local-date-time = full-date time-delim partial-time
+
+;; Local Date
+
+local-date = full-date
+
+;; Local Time
+
+local-time = partial-time
+
+;; Array
+
+array = array-open [ array-values ] ws-comment-newline array-close
+
+array-open = %x5B ; [
+array-close = %x5D ; ]
+
+array-values = ws-comment-newline val ws-comment-newline array-sep array-values
+array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ]
+
+array-sep = %x2C ; , Comma
+
+ws-comment-newline = *( wschar / [ comment ] newline )
+
+;; Table
+
+table = std-table / array-table
+
+;; Standard Table
+
+std-table = std-table-open key std-table-close
+
+std-table-open = %x5B ws ; [ Left square bracket
+std-table-close = ws %x5D ; ] Right square bracket
+
+;; Inline Table
+
+inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
+
+inline-table-open = %x7B ws ; {
+inline-table-close = ws %x7D ; }
+inline-table-sep = ws %x2C ws ; , Comma
+
+inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
+
+;; Array Table
+
+array-table = array-table-open key array-table-close
+
+array-table-open = %x5B.5B ws ; [[ Double left square bracket
+array-table-close = ws %x5D.5D ; ]] Double right square bracket
+
+;; Built-in ABNF terms, reproduced here for clarity
+
+ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
+DIGIT = %x30-39 ; 0-9
+HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go
new file mode 100644
index 000000000..630a45466
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go
@@ -0,0 +1,14 @@
+package toml
+
+import (
+ "encoding"
+ "reflect"
+ "time"
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
+var sliceInterfaceType = reflect.TypeOf([]interface{}{})
+var stringType = reflect.TypeOf("")
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
new file mode 100644
index 000000000..d0d7a72d0
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
@@ -0,0 +1,1227 @@
+package toml
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/pelletier/go-toml/v2/internal/ast"
+ "github.com/pelletier/go-toml/v2/internal/danger"
+ "github.com/pelletier/go-toml/v2/internal/tracker"
+)
+
+// Unmarshal deserializes a TOML document into a Go value.
+//
+// It is a shortcut for Decoder.Decode() with the default options.
+func Unmarshal(data []byte, v interface{}) error {
+ p := parser{}
+ p.Reset(data)
+ d := decoder{p: &p}
+
+ return d.FromParser(v)
+}
+
+// Decoder reads and decode a TOML document from an input stream.
+type Decoder struct {
+ // input
+ r io.Reader
+
+ // global settings
+ strict bool
+}
+
+// NewDecoder creates a new Decoder that will read from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the
+// destination is a struct and the input contains a key that does not match a
+// non-ignored field.
+//
+// In that case, the Decoder returns a StrictMissingError that can be used to
+// retrieve the individual errors as well as generate a human readable
+// description of the missing fields.
+func (d *Decoder) DisallowUnknownFields() *Decoder {
+ d.strict = true
+ return d
+}
+
+// Decode the whole content of r into v.
+//
+// By default, values in the document that don't exist in the target Go value
+// are ignored. See Decoder.DisallowUnknownFields() to change this behavior.
+//
+// When a TOML local date, time, or date-time is decoded into a time.Time, its
+// value is represented in time.Local timezone. Otherwise the approriate Local*
+// structure is used. For time values, precision up to the nanosecond is
+// supported by truncating extra digits.
+//
+// Empty tables decoded in an interface{} create an empty initialized
+// map[string]interface{}.
+//
+// Types implementing the encoding.TextUnmarshaler interface are decoded from a
+// TOML string.
+//
+// When decoding a number, go-toml will return an error if the number is out of
+// bounds for the target type (which includes negative numbers when decoding
+// into an unsigned int).
+//
+// If an error occurs while decoding the content of the document, this function
+// returns a toml.DecodeError, providing context about the issue. When using
+// strict mode and a field is missing, a `toml.StrictMissingError` is
+// returned. In any other case, this function returns a standard Go error.
+//
+// # Type mapping
+//
+// List of supported TOML types and their associated accepted Go types:
+//
+// String -> string
+// Integer -> uint*, int*, depending on size
+// Float -> float*, depending on size
+// Boolean -> bool
+// Offset Date-Time -> time.Time
+// Local Date-time -> LocalDateTime, time.Time
+// Local Date -> LocalDate, time.Time
+// Local Time -> LocalTime, time.Time
+// Array -> slice and array, depending on elements types
+// Table -> map and struct
+// Inline Table -> same as Table
+// Array of Tables -> same as Array and Table
+func (d *Decoder) Decode(v interface{}) error {
+ b, err := ioutil.ReadAll(d.r)
+ if err != nil {
+ return fmt.Errorf("toml: %w", err)
+ }
+
+ p := parser{}
+ p.Reset(b)
+ dec := decoder{
+ p: &p,
+ strict: strict{
+ Enabled: d.strict,
+ },
+ }
+
+ return dec.FromParser(v)
+}
+
+type decoder struct {
+ // Which parser instance in use for this decoding session.
+ p *parser
+
+ // Flag indicating that the current expression is stashed.
+ // If set to true, calling nextExpr will not actually pull a new expression
+ // but turn off the flag instead.
+ stashedExpr bool
+
+ // Skip expressions until a table is found. This is set to true when a
+ // table could not be created (missing field in map), so all KV expressions
+ // need to be skipped.
+ skipUntilTable bool
+
+ // Tracks position in Go arrays.
+ // This is used when decoding [[array tables]] into Go arrays. Given array
+ // tables are separate TOML expression, we need to keep track of where we
+ // are at in the Go array, as we can't just introspect its size.
+ arrayIndexes map[reflect.Value]int
+
+ // Tracks keys that have been seen, with which type.
+ seen tracker.SeenTracker
+
+ // Strict mode
+ strict strict
+
+ // Current context for the error.
+ errorContext *errorContext
+}
+
+type errorContext struct {
+ Struct reflect.Type
+ Field []int
+}
+
+func (d *decoder) typeMismatchError(toml string, target reflect.Type) error {
+ if d.errorContext != nil && d.errorContext.Struct != nil {
+ ctx := d.errorContext
+ f := ctx.Struct.FieldByIndex(ctx.Field)
+ return fmt.Errorf("toml: cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type)
+ }
+ return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target)
+}
+
+func (d *decoder) expr() *ast.Node {
+ return d.p.Expression()
+}
+
+func (d *decoder) nextExpr() bool {
+ if d.stashedExpr {
+ d.stashedExpr = false
+ return true
+ }
+ return d.p.NextExpression()
+}
+
+func (d *decoder) stashExpr() {
+ d.stashedExpr = true
+}
+
+func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int {
+ if d.arrayIndexes == nil {
+ d.arrayIndexes = make(map[reflect.Value]int, 1)
+ }
+
+ idx, ok := d.arrayIndexes[v]
+
+ if !ok {
+ d.arrayIndexes[v] = 0
+ } else if shouldAppend {
+ idx++
+ d.arrayIndexes[v] = idx
+ }
+
+ return idx
+}
+
+func (d *decoder) FromParser(v interface{}) error {
+ r := reflect.ValueOf(v)
+ if r.Kind() != reflect.Ptr {
+ return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind())
+ }
+
+ if r.IsNil() {
+ return fmt.Errorf("toml: decoding pointer target cannot be nil")
+ }
+
+ r = r.Elem()
+ if r.Kind() == reflect.Interface && r.IsNil() {
+ newMap := map[string]interface{}{}
+ r.Set(reflect.ValueOf(newMap))
+ }
+
+ err := d.fromParser(r)
+ if err == nil {
+ return d.strict.Error(d.p.data)
+ }
+
+ var e *decodeError
+ if errors.As(err, &e) {
+ return wrapDecodeError(d.p.data, e)
+ }
+
+ return err
+}
+
+func (d *decoder) fromParser(root reflect.Value) error {
+ for d.nextExpr() {
+ err := d.handleRootExpression(d.expr(), root)
+ if err != nil {
+ return err
+ }
+ }
+
+ return d.p.Error()
+}
+
+/*
+Rules for the unmarshal code:
+
+- The stack is used to keep track of which values need to be set where.
+- handle* functions <=> switch on a given ast.Kind.
+- unmarshalX* functions need to unmarshal a node of kind X.
+- An "object" is either a struct or a map.
+*/
+
+func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error {
+ var x reflect.Value
+ var err error
+
+ if !(d.skipUntilTable && expr.Kind == ast.KeyValue) {
+ err = d.seen.CheckExpression(expr)
+ if err != nil {
+ return err
+ }
+ }
+
+ switch expr.Kind {
+ case ast.KeyValue:
+ if d.skipUntilTable {
+ return nil
+ }
+ x, err = d.handleKeyValue(expr, v)
+ case ast.Table:
+ d.skipUntilTable = false
+ d.strict.EnterTable(expr)
+ x, err = d.handleTable(expr.Key(), v)
+ case ast.ArrayTable:
+ d.skipUntilTable = false
+ d.strict.EnterArrayTable(expr)
+ x, err = d.handleArrayTable(expr.Key(), v)
+ default:
+ panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind))
+ }
+
+ if d.skipUntilTable {
+ if expr.Kind == ast.Table || expr.Kind == ast.ArrayTable {
+ d.strict.MissingTable(expr)
+ }
+ } else if err == nil && x.IsValid() {
+ v.Set(x)
+ }
+
+ return err
+}
+
+func (d *decoder) handleArrayTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+ if key.Next() {
+ return d.handleArrayTablePart(key, v)
+ }
+ return d.handleKeyValues(v)
+}
+
+func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+ switch v.Kind() {
+ case reflect.Interface:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if elem.Kind() == reflect.Slice {
+ if elem.Type() != sliceInterfaceType {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if !elem.CanSet() {
+ nelem := reflect.New(sliceInterfaceType).Elem()
+ nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap()))
+ reflect.Copy(nelem, elem)
+ elem = nelem
+ }
+ }
+ return d.handleArrayTableCollectionLast(key, elem)
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ elem = ptr.Elem()
+ }
+
+ elem, err := d.handleArrayTableCollectionLast(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ v.Elem().Set(elem)
+
+ return v, nil
+ case reflect.Slice:
+ elemType := v.Type().Elem()
+ var elem reflect.Value
+ if elemType.Kind() == reflect.Interface {
+ elem = makeMapStringInterface()
+ } else {
+ elem = reflect.New(elemType).Elem()
+ }
+ elem2, err := d.handleArrayTable(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if elem2.IsValid() {
+ elem = elem2
+ }
+ return reflect.Append(v, elem), nil
+ case reflect.Array:
+ idx := d.arrayIndex(true, v)
+ if idx >= v.Len() {
+ return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx)
+ }
+ elem := v.Index(idx)
+ _, err := d.handleArrayTable(key, elem)
+ return v, err
+ default:
+ return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type())
+ }
+}
+
+// When parsing an array table expression, each part of the key needs to be
+// evaluated like a normal key, but if it returns a collection, it also needs to
+// point to the last element of the collection. Unless it is the last part of
+// the key, then it needs to create a new element at the end.
+func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+ if key.IsLast() {
+ return d.handleArrayTableCollectionLast(key, v)
+ }
+
+ switch v.Kind() {
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ elem = ptr.Elem()
+ }
+
+ elem, err := d.handleArrayTableCollection(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if elem.IsValid() {
+ v.Elem().Set(elem)
+ }
+
+ return v, nil
+ case reflect.Slice:
+ elem := v.Index(v.Len() - 1)
+ x, err := d.handleArrayTable(key, elem)
+ if err != nil || d.skipUntilTable {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ elem.Set(x)
+ }
+
+ return v, err
+ case reflect.Array:
+ idx := d.arrayIndex(false, v)
+ if idx >= v.Len() {
+ return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx)
+ }
+ elem := v.Index(idx)
+ _, err := d.handleArrayTable(key, elem)
+ return v, err
+ }
+
+ return d.handleArrayTable(key, v)
+}
+
+func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) {
+ var rv reflect.Value
+
+ // First, dispatch over v to make sure it is a valid object.
+ // There is no guarantee over what it could be.
+ switch v.Kind() {
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ elem = v.Elem()
+ return d.handleKeyPart(key, elem, nextFn, makeFn)
+ case reflect.Map:
+ vt := v.Type()
+
+ // Create the key for the map element. Convert to key type.
+ mk := reflect.ValueOf(string(key.Node().Data)).Convert(vt.Key())
+
+ // If the map does not exist, create it.
+ if v.IsNil() {
+ vt := v.Type()
+ v = reflect.MakeMap(vt)
+ rv = v
+ }
+
+ mv := v.MapIndex(mk)
+ set := false
+ if !mv.IsValid() {
+ // If there is no value in the map, create a new one according to
+ // the map type. If the element type is interface, create either a
+ // map[string]interface{} or a []interface{} depending on whether
+ // this is the last part of the array table key.
+
+ t := vt.Elem()
+ if t.Kind() == reflect.Interface {
+ mv = makeFn()
+ } else {
+ mv = reflect.New(t).Elem()
+ }
+ set = true
+ } else if mv.Kind() == reflect.Interface {
+ mv = mv.Elem()
+ if !mv.IsValid() {
+ mv = makeFn()
+ }
+ set = true
+ } else if !mv.CanAddr() {
+ vt := v.Type()
+ t := vt.Elem()
+ oldmv := mv
+ mv = reflect.New(t).Elem()
+ mv.Set(oldmv)
+ set = true
+ }
+
+ x, err := nextFn(key, mv)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ if x.IsValid() {
+ mv = x
+ set = true
+ }
+
+ if set {
+ v.SetMapIndex(mk, mv)
+ }
+ case reflect.Struct:
+ path, found := structFieldPath(v, string(key.Node().Data))
+ if !found {
+ d.skipUntilTable = true
+ return reflect.Value{}, nil
+ }
+
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ t := v.Type()
+ d.errorContext.Struct = t
+ d.errorContext.Field = path
+
+ f := fieldByIndex(v, path)
+ x, err := nextFn(key, f)
+ if err != nil || d.skipUntilTable {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ f.Set(x)
+ }
+ d.errorContext.Field = nil
+ d.errorContext.Struct = nil
+ case reflect.Interface:
+ if v.Elem().IsValid() {
+ v = v.Elem()
+ } else {
+ v = makeMapStringInterface()
+ }
+
+ x, err := d.handleKeyPart(key, v, nextFn, makeFn)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ v = x
+ }
+ rv = v
+ default:
+ panic(fmt.Errorf("unhandled part: %s", v.Kind()))
+ }
+
+ return rv, nil
+}
+
+// HandleArrayTablePart navigates the Go structure v using the key v. It is
+// only used for the prefix (non-last) parts of an array-table. When
+// encountering a collection, it should go to the last element.
+func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+ var makeFn valueMakerFn
+ if key.IsLast() {
+ makeFn = makeSliceInterface
+ } else {
+ makeFn = makeMapStringInterface
+ }
+ return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn)
+}
+
+// HandleTable returns a reference when it has checked the next expression but
+// cannot handle it.
+func (d *decoder) handleTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+ if v.Kind() == reflect.Slice {
+ if v.Len() == 0 {
+ return reflect.Value{}, newDecodeError(key.Node().Data, "cannot store a table in a slice")
+ }
+ elem := v.Index(v.Len() - 1)
+ x, err := d.handleTable(key, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ elem.Set(x)
+ }
+ return reflect.Value{}, nil
+ }
+ if key.Next() {
+ // Still scoping the key
+ return d.handleTablePart(key, v)
+ }
+ // Done scoping the key.
+ // Now handle all the key-value expressions in this table.
+ return d.handleKeyValues(v)
+}
+
+// Handle root expressions until the end of the document or the next
+// non-key-value.
+func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) {
+ var rv reflect.Value
+ for d.nextExpr() {
+ expr := d.expr()
+ if expr.Kind != ast.KeyValue {
+ // Stash the expression so that fromParser can just loop and use
+ // the right handler.
+ // We could just recurse ourselves here, but at least this gives a
+ // chance to pop the stack a bit.
+ d.stashExpr()
+ break
+ }
+
+ err := d.seen.CheckExpression(expr)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ x, err := d.handleKeyValue(expr, v)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ v = x
+ rv = x
+ }
+ }
+ return rv, nil
+}
+
+type (
+ handlerFn func(key ast.Iterator, v reflect.Value) (reflect.Value, error)
+ valueMakerFn func() reflect.Value
+)
+
+func makeMapStringInterface() reflect.Value {
+ return reflect.MakeMap(mapStringInterfaceType)
+}
+
+func makeSliceInterface() reflect.Value {
+ return reflect.MakeSlice(sliceInterfaceType, 0, 16)
+}
+
+func (d *decoder) handleTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+ return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface)
+}
+
+func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, error) {
+ // Special case for time, because we allow to unmarshal to it from
+ // different kind of AST nodes.
+ if v.Type() == timeType {
+ return false, nil
+ }
+
+ if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) {
+ err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data)
+ if err != nil {
+ return false, newDecodeError(d.p.Raw(node.Raw), "%w", err)
+ }
+
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error {
+ for v.Kind() == reflect.Ptr {
+ v = initAndDereferencePointer(v)
+ }
+
+ ok, err := d.tryTextUnmarshaler(value, v)
+ if ok || err != nil {
+ return err
+ }
+
+ switch value.Kind {
+ case ast.String:
+ return d.unmarshalString(value, v)
+ case ast.Integer:
+ return d.unmarshalInteger(value, v)
+ case ast.Float:
+ return d.unmarshalFloat(value, v)
+ case ast.Bool:
+ return d.unmarshalBool(value, v)
+ case ast.DateTime:
+ return d.unmarshalDateTime(value, v)
+ case ast.LocalDate:
+ return d.unmarshalLocalDate(value, v)
+ case ast.LocalTime:
+ return d.unmarshalLocalTime(value, v)
+ case ast.LocalDateTime:
+ return d.unmarshalLocalDateTime(value, v)
+ case ast.InlineTable:
+ return d.unmarshalInlineTable(value, v)
+ case ast.Array:
+ return d.unmarshalArray(value, v)
+ default:
+ panic(fmt.Errorf("handleValue not implemented for %s", value.Kind))
+ }
+}
+
+func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ if v.IsNil() {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 16))
+ } else {
+ v.SetLen(0)
+ }
+ case reflect.Array:
+ // arrays are always initialized
+ case reflect.Interface:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if elem.Kind() == reflect.Slice {
+ if elem.Type() != sliceInterfaceType {
+ elem = reflect.New(sliceInterfaceType).Elem()
+ elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16))
+ } else if !elem.CanSet() {
+ nelem := reflect.New(sliceInterfaceType).Elem()
+ nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap()))
+ reflect.Copy(nelem, elem)
+ elem = nelem
+ }
+ }
+ err := d.unmarshalArray(array, elem)
+ if err != nil {
+ return err
+ }
+ v.Set(elem)
+ return nil
+ default:
+ // TODO: use newDecodeError, but first the parser needs to fill
+ // array.Data.
+ return d.typeMismatchError("array", v.Type())
+ }
+
+ elemType := v.Type().Elem()
+
+ it := array.Children()
+ idx := 0
+ for it.Next() {
+ n := it.Node()
+
+ // TODO: optimize
+ if v.Kind() == reflect.Slice {
+ elem := reflect.New(elemType).Elem()
+
+ err := d.handleValue(n, elem)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.Append(v, elem))
+ } else { // array
+ if idx >= v.Len() {
+ return nil
+ }
+ elem := v.Index(idx)
+ err := d.handleValue(n, elem)
+ if err != nil {
+ return err
+ }
+ idx++
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error {
+ // Make sure v is an initialized object.
+ switch v.Kind() {
+ case reflect.Map:
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ case reflect.Struct:
+ // structs are always initialized.
+ case reflect.Interface:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ elem = makeMapStringInterface()
+ v.Set(elem)
+ }
+ return d.unmarshalInlineTable(itable, elem)
+ default:
+ return newDecodeError(itable.Data, "cannot store inline table in Go type %s", v.Kind())
+ }
+
+ it := itable.Children()
+ for it.Next() {
+ n := it.Node()
+
+ x, err := d.handleKeyValue(n, v)
+ if err != nil {
+ return err
+ }
+ if x.IsValid() {
+ v = x
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error {
+ dt, err := parseDateTime(value.Data)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.ValueOf(dt))
+ return nil
+}
+
+func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error {
+ ld, err := parseLocalDate(value.Data)
+ if err != nil {
+ return err
+ }
+
+ if v.Type() == timeType {
+ cast := ld.AsTime(time.Local)
+ v.Set(reflect.ValueOf(cast))
+ return nil
+ }
+
+ v.Set(reflect.ValueOf(ld))
+
+ return nil
+}
+
+func (d *decoder) unmarshalLocalTime(value *ast.Node, v reflect.Value) error {
+ lt, rest, err := parseLocalTime(value.Data)
+ if err != nil {
+ return err
+ }
+
+ if len(rest) > 0 {
+ return newDecodeError(rest, "extra characters at the end of a local time")
+ }
+
+ v.Set(reflect.ValueOf(lt))
+ return nil
+}
+
+func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error {
+ ldt, rest, err := parseLocalDateTime(value.Data)
+ if err != nil {
+ return err
+ }
+
+ if len(rest) > 0 {
+ return newDecodeError(rest, "extra characters at the end of a local date time")
+ }
+
+ if v.Type() == timeType {
+ cast := ldt.AsTime(time.Local)
+
+ v.Set(reflect.ValueOf(cast))
+ return nil
+ }
+
+ v.Set(reflect.ValueOf(ldt))
+
+ return nil
+}
+
+func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error {
+ b := value.Data[0] == 't'
+
+ switch v.Kind() {
+ case reflect.Bool:
+ v.SetBool(b)
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(b))
+ default:
+ return newDecodeError(value.Data, "cannot assign boolean to a %t", b)
+ }
+
+ return nil
+}
+
+func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error {
+ f, err := parseFloat(value.Data)
+ if err != nil {
+ return err
+ }
+
+ switch v.Kind() {
+ case reflect.Float64:
+ v.SetFloat(f)
+ case reflect.Float32:
+ if f > math.MaxFloat32 {
+ return newDecodeError(value.Data, "number %f does not fit in a float32", f)
+ }
+ v.SetFloat(f)
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(f))
+ default:
+ return newDecodeError(value.Data, "float cannot be assigned to %s", v.Kind())
+ }
+
+ return nil
+}
+
+const (
+ maxInt = int64(^uint(0) >> 1)
+ minInt = -maxInt - 1
+)
+
+// Maximum value of uint for decoding. Currently the decoder parses the integer
+// into an int64. As a result, on architectures where uint is 64 bits, the
+// effective maximum uint we can decode is the maximum of int64. On
+// architectures where uint is 32 bits, the maximum value we can decode is
+// lower: the maximum of uint32. I didn't find a way to figure out this value at
+// compile time, so it is computed during initialization.
+var maxUint int64 = math.MaxInt64
+
+func init() {
+ m := uint64(^uint(0))
+ if m < uint64(maxUint) {
+ maxUint = int64(m)
+ }
+}
+
+func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
+ i, err := parseInteger(value.Data)
+ if err != nil {
+ return err
+ }
+
+ var r reflect.Value
+
+ switch v.Kind() {
+ case reflect.Int64:
+ v.SetInt(i)
+ return nil
+ case reflect.Int32:
+ if i < math.MinInt32 || i > math.MaxInt32 {
+ return fmt.Errorf("toml: number %d does not fit in an int32", i)
+ }
+
+ r = reflect.ValueOf(int32(i))
+ case reflect.Int16:
+ if i < math.MinInt16 || i > math.MaxInt16 {
+ return fmt.Errorf("toml: number %d does not fit in an int16", i)
+ }
+
+ r = reflect.ValueOf(int16(i))
+ case reflect.Int8:
+ if i < math.MinInt8 || i > math.MaxInt8 {
+ return fmt.Errorf("toml: number %d does not fit in an int8", i)
+ }
+
+ r = reflect.ValueOf(int8(i))
+ case reflect.Int:
+ if i < minInt || i > maxInt {
+ return fmt.Errorf("toml: number %d does not fit in an int", i)
+ }
+
+ r = reflect.ValueOf(int(i))
+ case reflect.Uint64:
+ if i < 0 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint64", i)
+ }
+
+ r = reflect.ValueOf(uint64(i))
+ case reflect.Uint32:
+ if i < 0 || i > math.MaxUint32 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint32", i)
+ }
+
+ r = reflect.ValueOf(uint32(i))
+ case reflect.Uint16:
+ if i < 0 || i > math.MaxUint16 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint16", i)
+ }
+
+ r = reflect.ValueOf(uint16(i))
+ case reflect.Uint8:
+ if i < 0 || i > math.MaxUint8 {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint8", i)
+ }
+
+ r = reflect.ValueOf(uint8(i))
+ case reflect.Uint:
+ if i < 0 || i > maxUint {
+ return fmt.Errorf("toml: negative number %d does not fit in an uint", i)
+ }
+
+ r = reflect.ValueOf(uint(i))
+ case reflect.Interface:
+ r = reflect.ValueOf(i)
+ default:
+ return d.typeMismatchError("integer", v.Type())
+ }
+
+ if !r.Type().AssignableTo(v.Type()) {
+ r = r.Convert(v.Type())
+ }
+
+ v.Set(r)
+
+ return nil
+}
+
+func (d *decoder) unmarshalString(value *ast.Node, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.String:
+ v.SetString(string(value.Data))
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(string(value.Data)))
+ default:
+ return newDecodeError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind())
+ }
+
+ return nil
+}
+
+func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value, error) {
+ d.strict.EnterKeyValue(expr)
+
+ v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v)
+ if d.skipUntilTable {
+ d.strict.MissingField(expr)
+ d.skipUntilTable = false
+ }
+
+ d.strict.ExitKeyValue(expr)
+
+ return v, err
+}
+
+func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) {
+ if key.Next() {
+ // Still scoping the key
+ return d.handleKeyValuePart(key, value, v)
+ }
+ // Done scoping the key.
+ // v is whatever Go value we need to fill.
+ return reflect.Value{}, d.handleValue(value, v)
+}
+
+func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) {
+ // contains the replacement for v
+ var rv reflect.Value
+
+ // First, dispatch over v to make sure it is a valid object.
+ // There is no guarantee over what it could be.
+ switch v.Kind() {
+ case reflect.Map:
+ vt := v.Type()
+
+ mk := reflect.ValueOf(string(key.Node().Data))
+ mkt := stringType
+
+ keyType := vt.Key()
+ if !mkt.AssignableTo(keyType) {
+ if !mkt.ConvertibleTo(keyType) {
+ return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", mkt, keyType)
+ }
+
+ mk = mk.Convert(keyType)
+ }
+
+ // If the map does not exist, create it.
+ if v.IsNil() {
+ v = reflect.MakeMap(vt)
+ rv = v
+ }
+
+ mv := v.MapIndex(mk)
+ set := false
+ if !mv.IsValid() {
+ set = true
+ mv = reflect.New(v.Type().Elem()).Elem()
+ } else {
+ if key.IsLast() {
+ var x interface{}
+ mv = reflect.ValueOf(&x).Elem()
+ set = true
+ }
+ }
+
+ nv, err := d.handleKeyValueInner(key, value, mv)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if nv.IsValid() {
+ mv = nv
+ set = true
+ }
+
+ if set {
+ v.SetMapIndex(mk, mv)
+ }
+ case reflect.Struct:
+ path, found := structFieldPath(v, string(key.Node().Data))
+ if !found {
+ d.skipUntilTable = true
+ break
+ }
+
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ t := v.Type()
+ d.errorContext.Struct = t
+ d.errorContext.Field = path
+
+ f := fieldByIndex(v, path)
+ x, err := d.handleKeyValueInner(key, value, f)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ if x.IsValid() {
+ f.Set(x)
+ }
+ d.errorContext.Struct = nil
+ d.errorContext.Field = nil
+ case reflect.Interface:
+ v = v.Elem()
+
+ // Following encoding/json: decoding an object into an
+ // interface{}, it needs to always hold a
+ // map[string]interface{}. This is for the types to be
+ // consistent whether a previous value was set or not.
+ if !v.IsValid() || v.Type() != mapStringInterfaceType {
+ v = makeMapStringInterface()
+ }
+
+ x, err := d.handleKeyValuePart(key, value, v)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x.IsValid() {
+ v = x
+ }
+ rv = v
+ case reflect.Ptr:
+ elem := v.Elem()
+ if !elem.IsValid() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ rv = v
+ elem = ptr.Elem()
+ }
+
+ elem2, err := d.handleKeyValuePart(key, value, elem)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if elem2.IsValid() {
+ elem = elem2
+ }
+ v.Elem().Set(elem)
+ default:
+ return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind())
+ }
+
+ return rv, nil
+}
+
+func initAndDereferencePointer(v reflect.Value) reflect.Value {
+ var elem reflect.Value
+ if v.IsNil() {
+ ptr := reflect.New(v.Type().Elem())
+ v.Set(ptr)
+ }
+ elem = v.Elem()
+ return elem
+}
+
+// Same as reflect.Value.FieldByIndex, but creates pointers if needed.
+func fieldByIndex(v reflect.Value, path []int) reflect.Value {
+ for i, x := range path {
+ v = v.Field(x)
+
+ if i < len(path)-1 && v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+type fieldPathsMap = map[string][]int
+
+var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap
+
+func structFieldPath(v reflect.Value, name string) ([]int, bool) {
+ t := v.Type()
+
+ cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap)
+ fieldPaths, ok := cache[danger.MakeTypeID(t)]
+
+ if !ok {
+ fieldPaths = map[string][]int{}
+
+ forEachField(t, nil, func(name string, path []int) {
+ fieldPaths[name] = path
+ // extra copy for the case-insensitive match
+ fieldPaths[strings.ToLower(name)] = path
+ })
+
+ newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1)
+ newCache[danger.MakeTypeID(t)] = fieldPaths
+ for k, v := range cache {
+ newCache[k] = v
+ }
+ globalFieldPathsCache.Store(newCache)
+ }
+
+ path, ok := fieldPaths[name]
+ if !ok {
+ path, ok = fieldPaths[strings.ToLower(name)]
+ }
+ return path, ok
+}
+
+func forEachField(t reflect.Type, path []int, do func(name string, path []int)) {
+ n := t.NumField()
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+
+ if !f.Anonymous && f.PkgPath != "" {
+ // only consider exported fields.
+ continue
+ }
+
+ fieldPath := append(path, i)
+ fieldPath = fieldPath[:len(fieldPath):len(fieldPath)]
+
+ name := f.Tag.Get("toml")
+ if name == "-" {
+ continue
+ }
+
+ if i := strings.IndexByte(name, ','); i >= 0 {
+ name = name[:i]
+ }
+
+ if f.Anonymous && name == "" {
+ t2 := f.Type
+ if t2.Kind() == reflect.Ptr {
+ t2 = t2.Elem()
+ }
+
+ if t2.Kind() == reflect.Struct {
+ forEachField(t2, fieldPath, do)
+ }
+ continue
+ }
+
+ if name == "" {
+ name = f.Name
+ }
+
+ do(name, fieldPath)
+ }
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go
new file mode 100644
index 000000000..d47a4f20c
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go
@@ -0,0 +1,240 @@
+package toml
+
+import (
+ "unicode/utf8"
+)
+
+type utf8Err struct {
+ Index int
+ Size int
+}
+
+func (u utf8Err) Zero() bool {
+ return u.Size == 0
+}
+
+// Verified that a given string is only made of valid UTF-8 characters allowed
+// by the TOML spec:
+//
+// Any Unicode character may be used except those that must be escaped:
+// quotation mark, backslash, and the control characters other than tab (U+0000
+// to U+0008, U+000A to U+001F, U+007F).
+//
+// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early
+// when a character is not allowed.
+//
+// The returned utf8Err is Zero() if the string is valid, or contains the byte
+// index and size of the invalid character.
+//
+// quotation mark => already checked
+// backslash => already checked
+// 0-0x8 => invalid
+// 0x9 => tab, ok
+// 0xA - 0x1F => invalid
+// 0x7F => invalid
+func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
+ // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
+ offset := 0
+ for len(p) >= 8 {
+ // Combining two 32 bit loads allows the same code to be used
+ // for 32 and 64 bit platforms.
+ // The compiler can generate a 32bit load for first32 and second32
+ // on many platforms. See test/codegen/memcombine.go.
+ first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+ second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
+ if (first32|second32)&0x80808080 != 0 {
+ // Found a non ASCII byte (>= RuneSelf).
+ break
+ }
+
+ for i, b := range p[:8] {
+ if invalidAscii(b) {
+ err.Index = offset + i
+ err.Size = 1
+ return
+ }
+ }
+
+ p = p[8:]
+ offset += 8
+ }
+ n := len(p)
+ for i := 0; i < n; {
+ pi := p[i]
+ if pi < utf8.RuneSelf {
+ if invalidAscii(pi) {
+ err.Index = offset + i
+ err.Size = 1
+ return
+ }
+ i++
+ continue
+ }
+ x := first[pi]
+ if x == xx {
+ // Illegal starter byte.
+ err.Index = offset + i
+ err.Size = 1
+ return
+ }
+ size := int(x & 7)
+ if i+size > n {
+ // Short or invalid.
+ err.Index = offset + i
+ err.Size = n - i
+ return
+ }
+ accept := acceptRanges[x>>4]
+ if c := p[i+1]; c < accept.lo || accept.hi < c {
+ err.Index = offset + i
+ err.Size = 2
+ return
+ } else if size == 2 {
+ } else if c := p[i+2]; c < locb || hicb < c {
+ err.Index = offset + i
+ err.Size = 3
+ return
+ } else if size == 3 {
+ } else if c := p[i+3]; c < locb || hicb < c {
+ err.Index = offset + i
+ err.Size = 4
+ return
+ }
+ i += size
+ }
+ return
+}
+
+// Return the size of the next rune if valid, 0 otherwise.
+func utf8ValidNext(p []byte) int {
+ c := p[0]
+
+ if c < utf8.RuneSelf {
+ if invalidAscii(c) {
+ return 0
+ }
+ return 1
+ }
+
+ x := first[c]
+ if x == xx {
+ // Illegal starter byte.
+ return 0
+ }
+ size := int(x & 7)
+ if size > len(p) {
+ // Short or invalid.
+ return 0
+ }
+ accept := acceptRanges[x>>4]
+ if c := p[1]; c < accept.lo || accept.hi < c {
+ return 0
+ } else if size == 2 {
+ } else if c := p[2]; c < locb || hicb < c {
+ return 0
+ } else if size == 3 {
+ } else if c := p[3]; c < locb || hicb < c {
+ return 0
+ }
+
+ return size
+}
+
+var invalidAsciiTable = [256]bool{
+ 0x00: true,
+ 0x01: true,
+ 0x02: true,
+ 0x03: true,
+ 0x04: true,
+ 0x05: true,
+ 0x06: true,
+ 0x07: true,
+ 0x08: true,
+ // 0x09 TAB
+ // 0x0A LF
+ 0x0B: true,
+ 0x0C: true,
+ // 0x0D CR
+ 0x0E: true,
+ 0x0F: true,
+ 0x10: true,
+ 0x11: true,
+ 0x12: true,
+ 0x13: true,
+ 0x14: true,
+ 0x15: true,
+ 0x16: true,
+ 0x17: true,
+ 0x18: true,
+ 0x19: true,
+ 0x1A: true,
+ 0x1B: true,
+ 0x1C: true,
+ 0x1D: true,
+ 0x1E: true,
+ 0x1F: true,
+ // 0x20 - 0x7E Printable ASCII characters
+ 0x7F: true,
+}
+
+func invalidAscii(b byte) bool {
+ return invalidAsciiTable[b]
+}
+
+// acceptRange gives the range of valid values for the second byte in a UTF-8
+// sequence.
+type acceptRange struct {
+ lo uint8 // lowest value for second byte.
+ hi uint8 // highest value for second byte.
+}
+
+// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
+var acceptRanges = [16]acceptRange{
+ 0: {locb, hicb},
+ 1: {0xA0, hicb},
+ 2: {locb, 0x9F},
+ 3: {0x90, hicb},
+ 4: {locb, 0x8F},
+}
+
+// first is information about the first byte in a UTF-8 sequence.
+var first = [256]uint8{
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
+ xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
+ s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
+ s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
+ s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
+}
+
+const (
+ // The default lowest and highest continuation byte.
+ locb = 0b10000000
+ hicb = 0b10111111
+
+ // These names of these constants are chosen to give nice alignment in the
+ // table below. The first nibble is an index into acceptRanges or F for
+ // special one-byte cases. The second nibble is the Rune length or the
+ // Status for the special one-byte case.
+ xx = 0xF1 // invalid: size 1
+ as = 0xF0 // ASCII: size 1
+ s1 = 0x02 // accept 0, size 2
+ s2 = 0x13 // accept 1, size 3
+ s3 = 0x03 // accept 0, size 3
+ s4 = 0x23 // accept 2, size 3
+ s5 = 0x34 // accept 3, size 4
+ s6 = 0x04 // accept 0, size 4
+ s7 = 0x44 // accept 4, size 4
+)
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/LICENSE b/test/integration/vendor/github.com/power-devops/perfstat/LICENSE
new file mode 100644
index 000000000..ec4e5d39d
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/LICENSE
@@ -0,0 +1,23 @@
+MIT License
+
+Copyright (c) 2020 Power DevOps
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/c_helpers.c b/test/integration/vendor/github.com/power-devops/perfstat/c_helpers.c
new file mode 100644
index 000000000..49ba1ad7e
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/c_helpers.c
@@ -0,0 +1,159 @@
+#include "c_helpers.h"
+
+GETFUNC(cpu)
+GETFUNC(disk)
+GETFUNC(diskadapter)
+GETFUNC(diskpath)
+GETFUNC(fcstat)
+GETFUNC(logicalvolume)
+GETFUNC(memory_page)
+GETFUNC(netadapter)
+GETFUNC(netbuffer)
+GETFUNC(netinterface)
+GETFUNC(pagingspace)
+GETFUNC(process)
+GETFUNC(thread)
+GETFUNC(volumegroup)
+
+double get_partition_mhz(perfstat_partition_config_t pinfo) {
+ return pinfo.processorMHz;
+}
+
+char *get_ps_hostname(perfstat_pagingspace_t *ps) {
+ return ps->u.nfs_paging.hostname;
+}
+
+char *get_ps_filename(perfstat_pagingspace_t *ps) {
+ return ps->u.nfs_paging.filename;
+}
+
+char *get_ps_vgname(perfstat_pagingspace_t *ps) {
+ return ps->u.lv_paging.vgname;
+}
+
+time_t boottime()
+{
+ register struct utmpx *utmp;
+
+ setutxent();
+ while ( (utmp = getutxent()) != NULL ) {
+ if (utmp->ut_type == BOOT_TIME) {
+ return utmp->ut_tv.tv_sec;
+ }
+ }
+ endutxent();
+ return -1;
+}
+
+struct fsinfo *get_filesystem_stat(struct fsinfo *fs_all, int n) {
+ if (!fs_all) return NULL;
+ return &(fs_all[n]);
+}
+
+int get_mounts(struct vmount **vmountpp) {
+ int size;
+ struct vmount *vm;
+ int nmounts;
+
+ size = BUFSIZ;
+
+ while (1) {
+ if ((vm = (struct vmount *)malloc((size_t)size)) == NULL) {
+ perror("malloc failed");
+ exit(-1);
+ }
+ if ((nmounts = mntctl(MCTL_QUERY, size, (caddr_t)vm)) > 0) {
+ *vmountpp = vm;
+ return nmounts;
+ } else if (nmounts == 0) {
+ size = *(int *)vm;
+ free((void *)vm);
+ } else {
+ free((void *)vm);
+ return -1;
+ }
+ }
+}
+
+void fill_fsinfo(struct statfs statbuf, struct fsinfo *fs) {
+ fsblkcnt_t freeblks, totblks, usedblks;
+ fsblkcnt_t tinodes, ninodes, ifree;
+ uint cfactor;
+
+ if (statbuf.f_blocks == -1) {
+ fs->totalblks = 0;
+ fs->freeblks = 0;
+ fs->totalinodes = 0;
+ fs->freeinodes = 0;
+ return;
+ }
+
+ cfactor = statbuf.f_bsize / 512;
+ fs->freeblks = statbuf.f_bavail * cfactor;
+ fs->totalblks = statbuf.f_blocks * cfactor;
+
+ fs->freeinodes = statbuf.f_ffree;
+ fs->totalinodes = statbuf.f_files;
+
+ if (fs->freeblks < 0)
+ fs->freeblks = 0;
+}
+
+int getfsinfo(char *fsname, char *devname, char *host, char *options, int flags, int fstype, struct fsinfo *fs) {
+ struct statfs statbuf;
+ int devname_size = strlen(devname);
+ int fsname_size = strlen(fsname);
+ char buf[BUFSIZ];
+ char *p;
+
+ if (fs == NULL) {
+ return 1;
+ }
+
+ for (p = strtok(options, ","); p != NULL; p = strtok(NULL, ","))
+ if (strcmp(p, "ignore") == 0)
+ return 0;
+
+ if (*host != 0 && strcmp(host, "-") != 0) {
+ sprintf(buf, "%s:%s", host, devname);
+ devname = buf;
+ }
+ fs->devname = (char *)calloc(devname_size+1, 1);
+ fs->fsname = (char *)calloc(fsname_size+1, 1);
+ strncpy(fs->devname, devname, devname_size);
+ strncpy(fs->fsname, fsname, fsname_size);
+ fs->flags = flags;
+ fs->fstype = fstype;
+
+ if (statfs(fsname,&statbuf) < 0) {
+ return 1;
+ }
+
+ fill_fsinfo(statbuf, fs);
+ return 0;
+}
+
+struct fsinfo *get_all_fs(int *rc) {
+ struct vmount *mnt;
+ struct fsinfo *fs_all;
+ int nmounts;
+
+ *rc = -1;
+ if ((nmounts = get_mounts(&mnt)) <= 0) {
+ perror("Can't get mount table info");
+ return NULL;
+ }
+
+ fs_all = (struct fsinfo *)calloc(sizeof(struct fsinfo), nmounts);
+ while ((*rc)++, nmounts--) {
+ getfsinfo(vmt2dataptr(mnt, VMT_STUB),
+ vmt2dataptr(mnt, VMT_OBJECT),
+ vmt2dataptr(mnt, VMT_HOST),
+ vmt2dataptr(mnt, VMT_ARGS),
+ mnt->vmt_flags,
+ mnt->vmt_gfstype,
+ &fs_all[*rc]);
+ mnt = (struct vmount *)((char *)mnt + mnt->vmt_length);
+ }
+ return fs_all;
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/c_helpers.h b/test/integration/vendor/github.com/power-devops/perfstat/c_helpers.h
new file mode 100644
index 000000000..b66bc53c3
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/c_helpers.h
@@ -0,0 +1,58 @@
+#ifndef C_HELPERS_H
+#define C_HELPERS_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define GETFUNC(TYPE) perfstat_##TYPE##_t *get_##TYPE##_stat(perfstat_##TYPE##_t *b, int n) { \
+ if (!b) return NULL; \
+ return &(b[n]); \
+}
+
+#define GETFUNC_EXT(TYPE) extern perfstat_##TYPE##_t *get_##TYPE##_stat(perfstat_##TYPE##_t *, int);
+
+GETFUNC_EXT(cpu)
+GETFUNC_EXT(disk)
+GETFUNC_EXT(diskadapter)
+GETFUNC_EXT(diskpath)
+GETFUNC_EXT(fcstat)
+GETFUNC_EXT(logicalvolume)
+GETFUNC_EXT(memory_page)
+GETFUNC_EXT(netadapter)
+GETFUNC_EXT(netbuffer)
+GETFUNC_EXT(netinterface)
+GETFUNC_EXT(pagingspace)
+GETFUNC_EXT(process)
+GETFUNC_EXT(thread)
+GETFUNC_EXT(volumegroup)
+
+struct fsinfo {
+ char *devname;
+ char *fsname;
+ int flags;
+ int fstype;
+ unsigned long totalblks;
+ unsigned long freeblks;
+ unsigned long totalinodes;
+ unsigned long freeinodes;
+};
+
+extern double get_partition_mhz(perfstat_partition_config_t);
+extern char *get_ps_hostname(perfstat_pagingspace_t *);
+extern char *get_ps_filename(perfstat_pagingspace_t *);
+extern char *get_ps_vgname(perfstat_pagingspace_t *);
+extern time_t boottime();
+struct fsinfo *get_filesystem_stat(struct fsinfo *, int);
+int get_mounts(struct vmount **);
+void fill_statfs(struct statfs, struct fsinfo *);
+int getfsinfo(char *, char *, char *, char *, int, int, struct fsinfo *);
+struct fsinfo *get_all_fs(int *);
+
+#endif
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/config.go b/test/integration/vendor/github.com/power-devops/perfstat/config.go
new file mode 100644
index 000000000..de7230d28
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/config.go
@@ -0,0 +1,18 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+*/
+import "C"
+
+func EnableLVMStat() {
+ C.perfstat_config(C.PERFSTAT_ENABLE|C.PERFSTAT_LV|C.PERFSTAT_VG, nil)
+}
+
+func DisableLVMStat() {
+ C.perfstat_config(C.PERFSTAT_DISABLE|C.PERFSTAT_LV|C.PERFSTAT_VG, nil)
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/cpustat.go b/test/integration/vendor/github.com/power-devops/perfstat/cpustat.go
new file mode 100644
index 000000000..846daafba
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/cpustat.go
@@ -0,0 +1,138 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+#include
+
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "runtime"
+ "time"
+ "unsafe"
+)
+
+var old_cpu_total_stat *C.perfstat_cpu_total_t
+
+func init() {
+ old_cpu_total_stat = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t))
+ C.perfstat_cpu_total(nil, old_cpu_total_stat, C.sizeof_perfstat_cpu_total_t, 1)
+}
+
+func CpuStat() ([]CPU, error) {
+ var cpustat *C.perfstat_cpu_t
+ var cpu C.perfstat_id_t
+
+ ncpu := runtime.NumCPU()
+
+ cpustat_len := C.sizeof_perfstat_cpu_t * C.ulong(ncpu)
+ cpustat = (*C.perfstat_cpu_t)(C.malloc(cpustat_len))
+ defer C.free(unsafe.Pointer(cpustat))
+ C.strcpy(&cpu.name[0], C.CString(C.FIRST_CPU))
+ r := C.perfstat_cpu(&cpu, cpustat, C.sizeof_perfstat_cpu_t, C.int(ncpu))
+ if r <= 0 {
+ return nil, fmt.Errorf("error perfstat_cpu()")
+ }
+ c := make([]CPU, r)
+ for i := 0; i < int(r); i++ {
+ n := C.get_cpu_stat(cpustat, C.int(i))
+ if n != nil {
+ c[i] = perfstatcpu2cpu(n)
+ }
+ }
+ return c, nil
+}
+
+func CpuTotalStat() (*CPUTotal, error) {
+ var cpustat *C.perfstat_cpu_total_t
+
+ cpustat = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t))
+ defer C.free(unsafe.Pointer(cpustat))
+ r := C.perfstat_cpu_total(nil, cpustat, C.sizeof_perfstat_cpu_total_t, 1)
+ if r <= 0 {
+ return nil, fmt.Errorf("error perfstat_cpu_total()")
+ }
+ c := perfstatcputotal2cputotal(cpustat)
+ return &c, nil
+}
+
+func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) {
+ var cpuutil *C.perfstat_cpu_util_t
+ var newt *C.perfstat_cpu_total_t
+ var oldt *C.perfstat_cpu_total_t
+ var data C.perfstat_rawdata_t
+
+ oldt = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t))
+ newt = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t))
+ cpuutil = (*C.perfstat_cpu_util_t)(C.malloc(C.sizeof_perfstat_cpu_util_t))
+ defer C.free(unsafe.Pointer(oldt))
+ defer C.free(unsafe.Pointer(newt))
+ defer C.free(unsafe.Pointer(cpuutil))
+
+ r := C.perfstat_cpu_total(nil, oldt, C.sizeof_perfstat_cpu_total_t, 1)
+ if r <= 0 {
+ return nil, fmt.Errorf("error perfstat_cpu_total()")
+ }
+
+ time.Sleep(intvl)
+
+ r = C.perfstat_cpu_total(nil, newt, C.sizeof_perfstat_cpu_total_t, 1)
+ if r <= 0 {
+ return nil, fmt.Errorf("error perfstat_cpu_total()")
+ }
+
+ data._type = C.UTIL_CPU_TOTAL
+ data.curstat = unsafe.Pointer(newt)
+ data.prevstat = unsafe.Pointer(oldt)
+ data.sizeof_data = C.sizeof_perfstat_cpu_total_t
+ data.cur_elems = 1
+ data.prev_elems = 1
+
+ r = C.perfstat_cpu_util(&data, cpuutil, C.sizeof_perfstat_cpu_util_t, 1)
+ if r <= 0 {
+ return nil, fmt.Errorf("error perfstat_cpu_util()")
+ }
+ u := perfstatcpuutil2cpuutil(cpuutil)
+ return &u, nil
+}
+
+func CpuUtilTotalStat() (*CPUUtil, error) {
+ var cpuutil *C.perfstat_cpu_util_t
+ var new_cpu_total_stat *C.perfstat_cpu_total_t
+ var data C.perfstat_rawdata_t
+
+ new_cpu_total_stat = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t))
+ cpuutil = (*C.perfstat_cpu_util_t)(C.malloc(C.sizeof_perfstat_cpu_util_t))
+ defer C.free(unsafe.Pointer(cpuutil))
+
+ r := C.perfstat_cpu_total(nil, new_cpu_total_stat, C.sizeof_perfstat_cpu_total_t, 1)
+ if r <= 0 {
+ C.free(unsafe.Pointer(new_cpu_total_stat))
+ return nil, fmt.Errorf("error perfstat_cpu_total()")
+ }
+
+ data._type = C.UTIL_CPU_TOTAL
+ data.curstat = unsafe.Pointer(new_cpu_total_stat)
+ data.prevstat = unsafe.Pointer(old_cpu_total_stat)
+ data.sizeof_data = C.sizeof_perfstat_cpu_total_t
+ data.cur_elems = 1
+ data.prev_elems = 1
+
+ r = C.perfstat_cpu_util(&data, cpuutil, C.sizeof_perfstat_cpu_util_t, 1)
+ C.free(unsafe.Pointer(old_cpu_total_stat))
+ old_cpu_total_stat = new_cpu_total_stat
+ if r <= 0 {
+ return nil, fmt.Errorf("error perfstat_cpu_util()")
+ }
+ u := perfstatcpuutil2cpuutil(cpuutil)
+ return &u, nil
+}
+
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/diskstat.go b/test/integration/vendor/github.com/power-devops/perfstat/diskstat.go
new file mode 100644
index 000000000..fc70dfaa4
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/diskstat.go
@@ -0,0 +1,137 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+#include
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+func DiskTotalStat() (*DiskTotal, error) {
+ var disk C.perfstat_disk_total_t
+
+ rc := C.perfstat_disk_total(nil, &disk, C.sizeof_perfstat_disk_total_t, 1)
+ if rc != 1 {
+ return nil, fmt.Errorf("perfstat_disk_total() error")
+ }
+ d := perfstatdisktotal2disktotal(disk)
+ return &d, nil
+}
+
+func DiskAdapterStat() ([]DiskAdapter, error) {
+ var adapter *C.perfstat_diskadapter_t
+ var adptname C.perfstat_id_t
+
+ numadpt := C.perfstat_diskadapter(nil, nil, C.sizeof_perfstat_diskadapter_t, 0)
+ if numadpt <= 0 {
+ return nil, fmt.Errorf("perfstat_diskadapter() error")
+ }
+
+ adapter_len := C.sizeof_perfstat_diskadapter_t * C.ulong(numadpt)
+ adapter = (*C.perfstat_diskadapter_t)(C.malloc(adapter_len))
+ defer C.free(unsafe.Pointer(adapter))
+ C.strcpy(&adptname.name[0], C.CString(C.FIRST_DISKADAPTER))
+ r := C.perfstat_diskadapter(&adptname, adapter, C.sizeof_perfstat_diskadapter_t, numadpt)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_diskadapter() error")
+ }
+ da := make([]DiskAdapter, r)
+ for i := 0; i < int(r); i++ {
+ d := C.get_diskadapter_stat(adapter, C.int(i))
+ if d != nil {
+ da[i] = perfstatdiskadapter2diskadapter(d)
+ }
+ }
+ return da, nil
+}
+
+func DiskStat() ([]Disk, error) {
+ var disk *C.perfstat_disk_t
+ var diskname C.perfstat_id_t
+
+ numdisk := C.perfstat_disk(nil, nil, C.sizeof_perfstat_disk_t, 0)
+ if numdisk <= 0 {
+ return nil, fmt.Errorf("perfstat_disk() error")
+ }
+
+ disk_len := C.sizeof_perfstat_disk_t * C.ulong(numdisk)
+ disk = (*C.perfstat_disk_t)(C.malloc(disk_len))
+ defer C.free(unsafe.Pointer(disk))
+ C.strcpy(&diskname.name[0], C.CString(C.FIRST_DISK))
+ r := C.perfstat_disk(&diskname, disk, C.sizeof_perfstat_disk_t, numdisk)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_disk() error")
+ }
+ d := make([]Disk, r)
+ for i := 0; i < int(r); i++ {
+ ds := C.get_disk_stat(disk, C.int(i))
+ if ds != nil {
+ d[i] = perfstatdisk2disk(ds)
+ }
+ }
+ return d, nil
+}
+
+func DiskPathStat() ([]DiskPath, error) {
+ var diskpath *C.perfstat_diskpath_t
+ var pathname C.perfstat_id_t
+
+ numpaths := C.perfstat_diskpath(nil, nil, C.sizeof_perfstat_diskpath_t, 0)
+ if numpaths <= 0 {
+ return nil, fmt.Errorf("perfstat_diskpath() error")
+ }
+
+ path_len := C.sizeof_perfstat_diskpath_t * C.ulong(numpaths)
+ diskpath = (*C.perfstat_diskpath_t)(C.malloc(path_len))
+ defer C.free(unsafe.Pointer(diskpath))
+ C.strcpy(&pathname.name[0], C.CString(C.FIRST_DISKPATH))
+ r := C.perfstat_diskpath(&pathname, diskpath, C.sizeof_perfstat_diskpath_t, numpaths)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_diskpath() error")
+ }
+ d := make([]DiskPath, r)
+ for i := 0; i < int(r); i++ {
+ p := C.get_diskpath_stat(diskpath, C.int(i))
+ if p != nil {
+ d[i] = perfstatdiskpath2diskpath(p)
+ }
+ }
+ return d, nil
+}
+
+func FCAdapterStat() ([]FCAdapter, error) {
+ var fcstat *C.perfstat_fcstat_t
+ var fcname C.perfstat_id_t
+
+ numadpt := C.perfstat_fcstat(nil, nil, C.sizeof_perfstat_fcstat_t, 0)
+ if numadpt <= 0 {
+ return nil, fmt.Errorf("perfstat_fcstat() error")
+ }
+
+ fcstat_len := C.sizeof_perfstat_fcstat_t * C.ulong(numadpt)
+ fcstat = (*C.perfstat_fcstat_t)(C.malloc(fcstat_len))
+ defer C.free(unsafe.Pointer(fcstat))
+ C.strcpy(&fcname.name[0], C.CString(C.FIRST_NETINTERFACE))
+ r := C.perfstat_fcstat(&fcname, fcstat, C.sizeof_perfstat_fcstat_t, numadpt)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_fcstat() error")
+ }
+ fca := make([]FCAdapter, r)
+ for i := 0; i < int(r); i++ {
+ f := C.get_fcstat_stat(fcstat, C.int(i))
+ if f != nil {
+ fca[i] = perfstatfcstat2fcadapter(f)
+ }
+ }
+ return fca, nil
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/doc.go b/test/integration/vendor/github.com/power-devops/perfstat/doc.go
new file mode 100644
index 000000000..85eaf3e7e
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/doc.go
@@ -0,0 +1,315 @@
+// +build !aix
+
+// Copyright 2020 Power-Devops.com. All rights reserved.
+// Use of this source code is governed by the license
+// that can be found in the LICENSE file.
+/*
+Package perfstat is Go interface to IBM AIX libperfstat.
+To use it you need AIX with installed bos.perf.libperfstat. You can check, if is installed using the following command:
+
+ $ lslpp -L bos.perf.perfstat
+
+The package is written using Go 1.14.7 and AIX 7.2 TL5. It should work with earlier TLs of AIX 7.2, but I
+can't guarantee that perfstat structures in the TLs have all the same fields as the structures in AIX 7.2 TL5.
+
+For documentation of perfstat on AIX and using it in programs refer to the official IBM documentation:
+https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat.html
+*/
+package perfstat
+
+import (
+ "fmt"
+ "time"
+)
+
+// EnableLVMStat() switches on LVM (logical volumes and volume groups) performance statistics.
+// With this enabled you can use fields KBReads, KBWrites, and IOCnt
+// in LogicalVolume and VolumeGroup data types.
+func EnableLVMStat() {}
+
+// DisableLVMStat() switchess of LVM (logical volumes and volume groups) performance statistics.
+// This is the default state. In this case LogicalVolume and VolumeGroup data types are
+// populated with informations about LVM structures, but performance statistics fields
+// (KBReads, KBWrites, IOCnt) are empty.
+func DisableLVMStat() {}
+
+// CpuStat() returns array of CPU structures with information about
+// logical CPUs on the system.
+// IBM documentation:
+// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html
+// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html
+func CpuStat() ([]CPU, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+// CpuTotalStat() returns general information about CPUs on the system.
+// IBM documentation:
+// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html
+// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html
+func CpuTotalStat() (*CPUTotal, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+// CpuUtilStat() calculates CPU utilization.
+// IBM documentation:
+// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html
+// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html
+func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func DiskTotalStat() (*DiskTotal, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func DiskAdapterStat() ([]DiskAdapter, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func DiskStat() ([]Disk, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func DiskPathStat() ([]DiskPath, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func FCAdapterStat() ([]FCAdapter, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func PartitionStat() (*PartitionConfig, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func LogicalVolumeStat() ([]LogicalVolume, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func VolumeGroupStat() ([]VolumeGroup, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func MemoryTotalStat() (*MemoryTotal, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func MemoryPageStat() ([]MemoryPage, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func PagingSpaceStat() ([]PagingSpace, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func NetIfaceTotalStat() (*NetIfaceTotal, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func NetBufferStat() ([]NetBuffer, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func NetIfaceStat() ([]NetIface, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func NetAdapterStat() ([]NetAdapter, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func ProcessStat() ([]Process, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func ThreadStat() ([]Thread, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func Sysconf(name int32) (int64, error) {
+ return 0, fmt.Errorf("not implemented")
+}
+
+func GetCPUImplementation() string {
+ return ""
+}
+
+func POWER9OrNewer() bool {
+ return false
+}
+
+func POWER9() bool {
+ return false
+}
+
+func POWER8OrNewer() bool {
+ return false
+}
+
+func POWER8() bool {
+ return false
+}
+
+func POWER7OrNewer() bool {
+ return false
+}
+
+func POWER7() bool {
+ return false
+}
+
+func HasTransactionalMemory() bool {
+ return false
+}
+
+func Is64Bit() bool {
+ return false
+}
+
+func IsSMP() bool {
+ return false
+}
+
+func HasVMX() bool {
+ return false
+}
+
+func HasVSX() bool {
+ return false
+}
+
+func HasDFP() bool {
+ return false
+}
+
+func HasNxGzip() bool {
+ return false
+}
+
+func PksCapable() bool {
+ return false
+}
+
+func PksEnabled() bool {
+ return false
+}
+
+func CPUMode() string {
+ return ""
+}
+
+func KernelBits() int {
+ return 0
+}
+
+func IsLPAR() bool {
+ return false
+}
+
+func CpuAddCapable() bool {
+ return false
+}
+
+func CpuRemoveCapable() bool {
+ return false
+}
+
+func MemoryAddCapable() bool {
+ return false
+}
+
+func MemoryRemoveCapable() bool {
+ return false
+}
+
+func DLparCapable() bool {
+ return false
+}
+
+func IsNUMA() bool {
+ return false
+}
+
+func KernelKeys() bool {
+ return false
+}
+
+func RecoveryMode() bool {
+ return false
+}
+
+func EnhancedAffinity() bool {
+ return false
+}
+
+func VTpmEnabled() bool {
+ return false
+}
+
+func IsVIOS() bool {
+ return false
+}
+
+func MLSEnabled() bool {
+ return false
+}
+
+func SPLparCapable() bool {
+ return false
+}
+
+func SPLparEnabled() bool {
+ return false
+}
+
+func DedicatedLpar() bool {
+ return false
+}
+
+func SPLparCapped() bool {
+ return false
+}
+
+func SPLparDonating() bool {
+ return false
+}
+
+func SmtCapable() bool {
+ return false
+}
+
+func SmtEnabled() bool {
+ return false
+}
+
+func VrmCapable() bool {
+ return false
+}
+
+func VrmEnabled() bool {
+ return false
+}
+
+func AmeEnabled() bool {
+ return false
+}
+
+func EcoCapable() bool {
+ return false
+}
+
+func EcoEnabled() bool {
+ return false
+}
+
+func BootTime() (uint64, error) {
+ return 0, fmt.Errorf("Not implemented")
+}
+
+func UptimeSeconds() (uint64, error) {
+ return 0, fmt.Errorf("Not implemented")
+}
+
+func FileSystemStat() ([]FileSystem, error) {
+ return nil, fmt.Errorf("Not implemented")
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/fsstat.go b/test/integration/vendor/github.com/power-devops/perfstat/fsstat.go
new file mode 100644
index 000000000..27f4c06c1
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/fsstat.go
@@ -0,0 +1,31 @@
+// +build aix
+
+package perfstat
+
+/*
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func FileSystemStat() ([]FileSystem, error) {
+ var fsinfo *C.struct_fsinfo
+ var nmounts C.int
+
+ fsinfo = C.get_all_fs(&nmounts)
+ if nmounts <= 0 {
+ return nil, fmt.Errorf("No mounts found")
+ }
+
+ fs := make([]FileSystem, nmounts)
+ for i := 0; i < int(nmounts); i++ {
+ f := C.get_filesystem_stat(fsinfo, C.int(i))
+ if f != nil {
+ fs[i] = fsinfo2filesystem(f)
+ }
+ }
+ return fs, nil
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/helpers.go b/test/integration/vendor/github.com/power-devops/perfstat/helpers.go
new file mode 100644
index 000000000..1b13eb561
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/helpers.go
@@ -0,0 +1,764 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+
+#include "c_helpers.h"
+*/
+import "C"
+
+func perfstatcpu2cpu(n *C.perfstat_cpu_t) CPU {
+ var c CPU
+ c.Name = C.GoString(&n.name[0])
+ c.User = int64(n.user)
+ c.Sys = int64(n.sys)
+ c.Idle = int64(n.idle)
+ c.Wait = int64(n.wait)
+ c.PSwitch = int64(n.pswitch)
+ c.Syscall = int64(n.syscall)
+ c.Sysread = int64(n.sysread)
+ c.Syswrite = int64(n.syswrite)
+ c.Sysfork = int64(n.sysfork)
+ c.Sysexec = int64(n.sysexec)
+ c.Readch = int64(n.readch)
+ c.Writech = int64(n.writech)
+ c.Bread = int64(n.bread)
+ c.Bwrite = int64(n.bwrite)
+ c.Lread = int64(n.lread)
+ c.Lwrite = int64(n.lwrite)
+ c.Phread = int64(n.phread)
+ c.Phwrite = int64(n.phwrite)
+ c.Iget = int64(n.iget)
+ c.Namei = int64(n.namei)
+ c.Dirblk = int64(n.dirblk)
+ c.Msg = int64(n.msg)
+ c.Sema = int64(n.sema)
+ c.MinFaults = int64(n.minfaults)
+ c.MajFaults = int64(n.majfaults)
+ c.PUser = int64(n.puser)
+ c.PSys = int64(n.psys)
+ c.PIdle = int64(n.pidle)
+ c.PWait = int64(n.pwait)
+ c.RedispSD0 = int64(n.redisp_sd0)
+ c.RedispSD1 = int64(n.redisp_sd1)
+ c.RedispSD2 = int64(n.redisp_sd2)
+ c.RedispSD3 = int64(n.redisp_sd3)
+ c.RedispSD4 = int64(n.redisp_sd4)
+ c.RedispSD5 = int64(n.redisp_sd5)
+ c.MigrationPush = int64(n.migration_push)
+ c.MigrationS3grq = int64(n.migration_S3grq)
+ c.MigrationS3pul = int64(n.migration_S3pul)
+ c.InvolCSwitch = int64(n.invol_cswitch)
+ c.VolCSwitch = int64(n.vol_cswitch)
+ c.RunQueue = int64(n.runque)
+ c.Bound = int64(n.bound)
+ c.DecrIntrs = int64(n.decrintrs)
+ c.MpcRIntrs = int64(n.mpcrintrs)
+ c.MpcSIntrs = int64(n.mpcsintrs)
+ c.SoftIntrs = int64(n.softintrs)
+ c.DevIntrs = int64(n.devintrs)
+ c.PhantIntrs = int64(n.phantintrs)
+ c.IdleDonatedPurr = int64(n.idle_donated_purr)
+ c.IdleDonatedSpurr = int64(n.idle_donated_spurr)
+ c.BusyDonatedPurr = int64(n.busy_donated_purr)
+ c.BusyDonatedSpurr = int64(n.busy_donated_spurr)
+ c.IdleStolenPurr = int64(n.idle_stolen_purr)
+ c.IdleStolenSpurr = int64(n.idle_stolen_spurr)
+ c.BusyStolenPurr = int64(n.busy_stolen_purr)
+ c.BusyStolenSpurr = int64(n.busy_stolen_spurr)
+ c.Hpi = int64(n.hpi)
+ c.Hpit = int64(n.hpit)
+ c.PUserSpurr = int64(n.puser_spurr)
+ c.PSysSpurr = int64(n.psys_spurr)
+ c.PIdleSpurr = int64(n.pidle_spurr)
+ c.PWaitSpurr = int64(n.pwait_spurr)
+ c.SpurrFlag = int32(n.spurrflag)
+ c.LocalDispatch = int64(n.localdispatch)
+ c.NearDispatch = int64(n.neardispatch)
+ c.FarDispatch = int64(n.fardispatch)
+ c.CSwitches = int64(n.cswitches)
+ c.Version = int64(n.version)
+ c.TbLast = int64(n.tb_last)
+ c.State = int(n.state)
+ c.VtbLast = int64(n.vtb_last)
+ c.ICountLast = int64(n.icount_last)
+ return c
+}
+
+func perfstatcputotal2cputotal(n *C.perfstat_cpu_total_t) CPUTotal {
+ var c CPUTotal
+ c.NCpus = int(n.ncpus)
+ c.NCpusCfg = int(n.ncpus_cfg)
+ c.Description = C.GoString(&n.description[0])
+ c.ProcessorHz = int64(n.processorHZ)
+ c.User = int64(n.user)
+ c.Sys = int64(n.sys)
+ c.Idle = int64(n.idle)
+ c.Wait = int64(n.wait)
+ c.PSwitch = int64(n.pswitch)
+ c.Syscall = int64(n.syscall)
+ c.Sysread = int64(n.sysread)
+ c.Syswrite = int64(n.syswrite)
+ c.Sysfork = int64(n.sysfork)
+ c.Sysexec = int64(n.sysexec)
+ c.Readch = int64(n.readch)
+ c.Writech = int64(n.writech)
+ c.DevIntrs = int64(n.devintrs)
+ c.SoftIntrs = int64(n.softintrs)
+ c.Lbolt = int64(n.lbolt)
+ c.LoadAvg1 = (float32(n.loadavg[0]) / (1 << C.SBITS))
+ c.LoadAvg5 = (float32(n.loadavg[1]) / (1 << C.SBITS))
+ c.LoadAvg15 = (float32(n.loadavg[2]) / (1 << C.SBITS))
+ c.RunQueue = int64(n.runque)
+ c.SwpQueue = int64(n.swpque)
+ c.Bread = int64(n.bread)
+ c.Bwrite = int64(n.bwrite)
+ c.Lread = int64(n.lread)
+ c.Lwrite = int64(n.lwrite)
+ c.Phread = int64(n.phread)
+ c.Phwrite = int64(n.phwrite)
+ c.RunOcc = int64(n.runocc)
+ c.SwpOcc = int64(n.swpocc)
+ c.Iget = int64(n.iget)
+ c.Namei = int64(n.namei)
+ c.Dirblk = int64(n.dirblk)
+ c.Msg = int64(n.msg)
+ c.Sema = int64(n.sema)
+ c.RcvInt = int64(n.rcvint)
+ c.XmtInt = int64(n.xmtint)
+ c.MdmInt = int64(n.mdmint)
+ c.TtyRawInch = int64(n.tty_rawinch)
+ c.TtyCanInch = int64(n.tty_caninch)
+ c.TtyRawOutch = int64(n.tty_rawoutch)
+ c.Ksched = int64(n.ksched)
+ c.Koverf = int64(n.koverf)
+ c.Kexit = int64(n.kexit)
+ c.Rbread = int64(n.rbread)
+ c.Rcread = int64(n.rcread)
+ c.Rbwrt = int64(n.rbwrt)
+ c.Rcwrt = int64(n.rcwrt)
+ c.Traps = int64(n.traps)
+ c.NCpusHigh = int64(n.ncpus_high)
+ c.PUser = int64(n.puser)
+ c.PSys = int64(n.psys)
+ c.PIdle = int64(n.pidle)
+ c.PWait = int64(n.pwait)
+ c.DecrIntrs = int64(n.decrintrs)
+ c.MpcRIntrs = int64(n.mpcrintrs)
+ c.MpcSIntrs = int64(n.mpcsintrs)
+ c.PhantIntrs = int64(n.phantintrs)
+ c.IdleDonatedPurr = int64(n.idle_donated_purr)
+ c.IdleDonatedSpurr = int64(n.idle_donated_spurr)
+ c.BusyDonatedPurr = int64(n.busy_donated_purr)
+ c.BusyDonatedSpurr = int64(n.busy_donated_spurr)
+ c.IdleStolenPurr = int64(n.idle_stolen_purr)
+ c.IdleStolenSpurr = int64(n.idle_stolen_spurr)
+ c.BusyStolenPurr = int64(n.busy_stolen_purr)
+ c.BusyStolenSpurr = int64(n.busy_stolen_spurr)
+ c.IOWait = int32(n.iowait)
+ c.PhysIO = int32(n.physio)
+ c.TWait = int64(n.twait)
+ c.Hpi = int64(n.hpi)
+ c.Hpit = int64(n.hpit)
+ c.PUserSpurr = int64(n.puser_spurr)
+ c.PSysSpurr = int64(n.psys_spurr)
+ c.PIdleSpurr = int64(n.pidle_spurr)
+ c.PWaitSpurr = int64(n.pwait_spurr)
+ c.SpurrFlag = int(n.spurrflag)
+ c.Version = int64(n.version)
+ c.TbLast = int64(n.tb_last)
+ c.PurrCoalescing = int64(n.purr_coalescing)
+ c.SpurrCoalescing = int64(n.spurr_coalescing)
+ return c
+}
+
+func perfstatcpuutil2cpuutil(n *C.perfstat_cpu_util_t) CPUUtil {
+ var c CPUUtil
+
+ c.Version = int64(n.version)
+ c.CpuID = C.GoString(&n.cpu_id[0])
+ c.Entitlement = float32(n.entitlement)
+ c.UserPct = float32(n.user_pct)
+ c.KernPct = float32(n.kern_pct)
+ c.IdlePct = float32(n.idle_pct)
+ c.WaitPct = float32(n.wait_pct)
+ c.PhysicalBusy = float32(n.physical_busy)
+ c.PhysicalConsumed = float32(n.physical_consumed)
+ c.FreqPct = float32(n.freq_pct)
+ c.EntitlementPct = float32(n.entitlement_pct)
+ c.BusyPct = float32(n.busy_pct)
+ c.IdleDonatedPct = float32(n.idle_donated_pct)
+ c.BusyDonatedPct = float32(n.busy_donated_pct)
+ c.IdleStolenPct = float32(n.idle_stolen_pct)
+ c.BusyStolenPct = float32(n.busy_stolen_pct)
+ c.LUserPct = float32(n.l_user_pct)
+ c.LKernPct = float32(n.l_kern_pct)
+ c.LIdlePct = float32(n.l_idle_pct)
+ c.LWaitPct = float32(n.l_wait_pct)
+ c.DeltaTime = int64(n.delta_time)
+
+ return c
+}
+
+func perfstatdisktotal2disktotal(n C.perfstat_disk_total_t) DiskTotal {
+ var d DiskTotal
+
+ d.Number = int32(n.number)
+ d.Size = int64(n.size)
+ d.Free = int64(n.free)
+ d.XRate = int64(n.xrate)
+ d.Xfers = int64(n.xfers)
+ d.Wblks = int64(n.wblks)
+ d.Rblks = int64(n.rblks)
+ d.Time = int64(n.time)
+ d.Version = int64(n.version)
+ d.Rserv = int64(n.rserv)
+ d.MinRserv = int64(n.min_rserv)
+ d.MaxRserv = int64(n.max_rserv)
+ d.RTimeOut = int64(n.rtimeout)
+ d.RFailed = int64(n.rfailed)
+ d.Wserv = int64(n.wserv)
+ d.MinWserv = int64(n.min_wserv)
+ d.MaxWserv = int64(n.max_wserv)
+ d.WTimeOut = int64(n.wtimeout)
+ d.WFailed = int64(n.wfailed)
+ d.WqDepth = int64(n.wq_depth)
+ d.WqTime = int64(n.wq_time)
+ d.WqMinTime = int64(n.wq_min_time)
+ d.WqMaxTime = int64(n.wq_max_time)
+
+ return d
+}
+
+func perfstatdiskadapter2diskadapter(n *C.perfstat_diskadapter_t) DiskAdapter {
+ var d DiskAdapter
+
+ d.Name = C.GoString(&n.name[0])
+ d.Description = C.GoString(&n.description[0])
+ d.Number = int32(n.number)
+ d.Size = int64(n.size)
+ d.Free = int64(n.free)
+ d.XRate = int64(n.xrate)
+ d.Xfers = int64(n.xfers)
+ d.Rblks = int64(n.rblks)
+ d.Wblks = int64(n.wblks)
+ d.Time = int64(n.time)
+ d.Version = int64(n.version)
+ d.AdapterType = int64(n.adapter_type)
+ d.DkBSize = int64(n.dk_bsize)
+ d.DkRserv = int64(n.dk_rserv)
+ d.DkWserv = int64(n.dk_wserv)
+ d.MinRserv = int64(n.min_rserv)
+ d.MaxRserv = int64(n.max_rserv)
+ d.MinWserv = int64(n.min_wserv)
+ d.MaxWserv = int64(n.max_wserv)
+ d.WqDepth = int64(n.wq_depth)
+ d.WqSampled = int64(n.wq_sampled)
+ d.WqTime = int64(n.wq_time)
+ d.WqMinTime = int64(n.wq_min_time)
+ d.WqMaxTime = int64(n.wq_max_time)
+ d.QFull = int64(n.q_full)
+ d.QSampled = int64(n.q_sampled)
+
+ return d
+}
+
+func perfstatpartitionconfig2partitionconfig(n C.perfstat_partition_config_t) PartitionConfig {
+ var p PartitionConfig
+ p.Version = int64(n.version)
+ p.Name = C.GoString(&n.partitionname[0])
+ p.Node = C.GoString(&n.nodename[0])
+ p.Conf.SmtCapable = (n.conf[0] & (1 << 7)) > 0
+ p.Conf.SmtEnabled = (n.conf[0] & (1 << 6)) > 0
+ p.Conf.LparCapable = (n.conf[0] & (1 << 5)) > 0
+ p.Conf.LparEnabled = (n.conf[0] & (1 << 4)) > 0
+ p.Conf.SharedCapable = (n.conf[0] & (1 << 3)) > 0
+ p.Conf.SharedEnabled = (n.conf[0] & (1 << 2)) > 0
+ p.Conf.DLparCapable = (n.conf[0] & (1 << 1)) > 0
+ p.Conf.Capped = (n.conf[0] & (1 << 0)) > 0
+ p.Conf.Kernel64bit = (n.conf[1] & (1 << 7)) > 0
+ p.Conf.PoolUtilAuthority = (n.conf[1] & (1 << 6)) > 0
+ p.Conf.DonateCapable = (n.conf[1] & (1 << 5)) > 0
+ p.Conf.DonateEnabled = (n.conf[1] & (1 << 4)) > 0
+ p.Conf.AmsCapable = (n.conf[1] & (1 << 3)) > 0
+ p.Conf.AmsEnabled = (n.conf[1] & (1 << 2)) > 0
+ p.Conf.PowerSave = (n.conf[1] & (1 << 1)) > 0
+ p.Conf.AmeEnabled = (n.conf[1] & (1 << 0)) > 0
+ p.Conf.SharedExtended = (n.conf[2] & (1 << 7)) > 0
+ p.Number = int32(n.partitionnum)
+ p.GroupID = int32(n.groupid)
+ p.ProcessorFamily = C.GoString(&n.processorFamily[0])
+ p.ProcessorModel = C.GoString(&n.processorModel[0])
+ p.MachineID = C.GoString(&n.machineID[0])
+ p.ProcessorMhz = float64(C.get_partition_mhz(n))
+ p.NumProcessors.Online = int64(n.numProcessors.online)
+ p.NumProcessors.Max = int64(n.numProcessors.max)
+ p.NumProcessors.Min = int64(n.numProcessors.min)
+ p.NumProcessors.Desired = int64(n.numProcessors.desired)
+ p.OSName = C.GoString(&n.OSName[0])
+ p.OSVersion = C.GoString(&n.OSVersion[0])
+ p.OSBuild = C.GoString(&n.OSBuild[0])
+ p.LCpus = int32(n.lcpus)
+ p.SmtThreads = int32(n.smtthreads)
+ p.Drives = int32(n.drives)
+ p.NetworkAdapters = int32(n.nw_adapters)
+ p.CpuCap.Online = int64(n.cpucap.online)
+ p.CpuCap.Max = int64(n.cpucap.max)
+ p.CpuCap.Min = int64(n.cpucap.min)
+ p.CpuCap.Desired = int64(n.cpucap.desired)
+ p.Weightage = int32(n.cpucap_weightage)
+ p.EntCapacity = int32(n.entitled_proc_capacity)
+ p.VCpus.Online = int64(n.vcpus.online)
+ p.VCpus.Max = int64(n.vcpus.max)
+ p.VCpus.Min = int64(n.vcpus.min)
+ p.VCpus.Desired = int64(n.vcpus.desired)
+ p.PoolID = int32(n.processor_poolid)
+ p.ActiveCpusInPool = int32(n.activecpusinpool)
+ p.PoolWeightage = int32(n.cpupool_weightage)
+ p.SharedPCpu = int32(n.sharedpcpu)
+ p.MaxPoolCap = int32(n.maxpoolcap)
+ p.EntPoolCap = int32(n.entpoolcap)
+ p.Mem.Online = int64(n.mem.online)
+ p.Mem.Max = int64(n.mem.max)
+ p.Mem.Min = int64(n.mem.min)
+ p.Mem.Desired = int64(n.mem.desired)
+ p.MemWeightage = int32(n.mem_weightage)
+ p.TotalIOMemoryEntitlement = int64(n.totiomement)
+ p.MemPoolID = int32(n.mempoolid)
+ p.HyperPgSize = int64(n.hyperpgsize)
+ p.ExpMem.Online = int64(n.exp_mem.online)
+ p.ExpMem.Max = int64(n.exp_mem.max)
+ p.ExpMem.Min = int64(n.exp_mem.min)
+ p.ExpMem.Desired = int64(n.exp_mem.desired)
+ p.TargetMemExpFactor = int64(n.targetmemexpfactor)
+ p.TargetMemExpSize = int64(n.targetmemexpsize)
+ p.SubProcessorMode = int32(n.subprocessor_mode)
+ return p
+}
+
+func perfstatmemorytotal2memorytotal(n C.perfstat_memory_total_t) MemoryTotal {
+ var m MemoryTotal
+ m.VirtualTotal = int64(n.virt_total)
+ m.RealTotal = int64(n.real_total)
+ m.RealFree = int64(n.real_free)
+ m.RealPinned = int64(n.real_pinned)
+ m.RealInUse = int64(n.real_inuse)
+ m.BadPages = int64(n.pgbad)
+ m.PageFaults = int64(n.pgexct)
+ m.PageIn = int64(n.pgins)
+ m.PageOut = int64(n.pgouts)
+ m.PgSpIn = int64(n.pgspins)
+ m.PgSpOut = int64(n.pgspouts)
+ m.Scans = int64(n.scans)
+ m.Cycles = int64(n.cycles)
+ m.PgSteals = int64(n.pgsteals)
+ m.NumPerm = int64(n.numperm)
+ m.PgSpTotal = int64(n.pgsp_total)
+ m.PgSpFree = int64(n.pgsp_free)
+ m.PgSpRsvd = int64(n.pgsp_rsvd)
+ m.RealSystem = int64(n.real_system)
+ m.RealUser = int64(n.real_user)
+ m.RealProcess = int64(n.real_process)
+ m.VirtualActive = int64(n.virt_active)
+ m.IOME = int64(n.iome)
+ m.IOMU = int64(n.iomu)
+ m.IOHWM = int64(n.iohwm)
+ m.PMem = int64(n.pmem)
+ m.CompressedTotal = int64(n.comprsd_total)
+ m.CompressedWSegPg = int64(n.comprsd_wseg_pgs)
+ m.CPgIn = int64(n.cpgins)
+ m.CPgOut = int64(n.cpgouts)
+ m.TrueSize = int64(n.true_size)
+ m.ExpandedMemory = int64(n.expanded_memory)
+ m.CompressedWSegSize = int64(n.comprsd_wseg_size)
+ m.TargetCPoolSize = int64(n.target_cpool_size)
+ m.MaxCPoolSize = int64(n.max_cpool_size)
+ m.MinUCPoolSize = int64(n.min_ucpool_size)
+ m.CPoolSize = int64(n.cpool_size)
+ m.UCPoolSize = int64(n.ucpool_size)
+ m.CPoolInUse = int64(n.cpool_inuse)
+ m.UCPoolInUse = int64(n.ucpool_inuse)
+ m.Version = int64(n.version)
+ m.RealAvailable = int64(n.real_avail)
+ m.BytesCoalesced = int64(n.bytes_coalesced)
+ m.BytesCoalescedMemPool = int64(n.bytes_coalesced_mempool)
+
+ return m
+}
+
+func perfstatnetinterfacetotal2netifacetotal(n C.perfstat_netinterface_total_t) NetIfaceTotal {
+ var i NetIfaceTotal
+
+ i.Number = int32(n.number)
+ i.IPackets = int64(n.ipackets)
+ i.IBytes = int64(n.ibytes)
+ i.IErrors = int64(n.ierrors)
+ i.OPackets = int64(n.opackets)
+ i.OBytes = int64(n.obytes)
+ i.OErrors = int64(n.oerrors)
+ i.Collisions = int64(n.collisions)
+ i.XmitDrops = int64(n.xmitdrops)
+ i.Version = int64(n.version)
+
+ return i
+}
+
+func perfstatdisk2disk(n *C.perfstat_disk_t) Disk {
+ var d Disk
+
+ d.Name = C.GoString(&n.name[0])
+ d.Description = C.GoString(&n.description[0])
+ d.VGName = C.GoString(&n.vgname[0])
+ d.Size = int64(n.size)
+ d.Free = int64(n.free)
+ d.BSize = int64(n.bsize)
+ d.XRate = int64(n.xrate)
+ d.Xfers = int64(n.xfers)
+ d.Wblks = int64(n.wblks)
+ d.Rblks = int64(n.rblks)
+ d.QDepth = int64(n.qdepth)
+ d.Time = int64(n.time)
+ d.Adapter = C.GoString(&n.adapter[0])
+ d.PathsCount = int32(n.paths_count)
+ d.QFull = int64(n.q_full)
+ d.Rserv = int64(n.rserv)
+ d.RTimeOut = int64(n.rtimeout)
+ d.Rfailed = int64(n.rfailed)
+ d.MinRserv = int64(n.min_rserv)
+ d.MaxRserv = int64(n.max_rserv)
+ d.Wserv = int64(n.wserv)
+ d.WTimeOut = int64(n.wtimeout)
+ d.Wfailed = int64(n.wfailed)
+ d.MinWserv = int64(n.min_wserv)
+ d.MaxWserv = int64(n.max_wserv)
+ d.WqDepth = int64(n.wq_depth)
+ d.WqSampled = int64(n.wq_sampled)
+ d.WqTime = int64(n.wq_time)
+ d.WqMinTime = int64(n.wq_min_time)
+ d.WqMaxTime = int64(n.wq_max_time)
+ d.QSampled = int64(n.q_sampled)
+ d.Version = int64(n.version)
+ d.PseudoDisk = (n.dk_type[0] & (1 << 7)) > 0
+ d.VTDisk = (n.dk_type[0] & (1 << 6)) > 0
+
+ return d
+}
+
+func perfstatdiskpath2diskpath(n *C.perfstat_diskpath_t) DiskPath {
+ var d DiskPath
+
+ d.Name = C.GoString(&n.name[0])
+ d.XRate = int64(n.xrate)
+ d.Xfers = int64(n.xfers)
+ d.Rblks = int64(n.rblks)
+ d.Wblks = int64(n.wblks)
+ d.Time = int64(n.time)
+ d.Adapter = C.GoString(&n.adapter[0])
+ d.QFull = int64(n.q_full)
+ d.Rserv = int64(n.rserv)
+ d.RTimeOut = int64(n.rtimeout)
+ d.Rfailed = int64(n.rfailed)
+ d.MinRserv = int64(n.min_rserv)
+ d.MaxRserv = int64(n.max_rserv)
+ d.Wserv = int64(n.wserv)
+ d.WTimeOut = int64(n.wtimeout)
+ d.Wfailed = int64(n.wfailed)
+ d.MinWserv = int64(n.min_wserv)
+ d.MaxWserv = int64(n.max_wserv)
+ d.WqDepth = int64(n.wq_depth)
+ d.WqSampled = int64(n.wq_sampled)
+ d.WqTime = int64(n.wq_time)
+ d.WqMinTime = int64(n.wq_min_time)
+ d.WqMaxTime = int64(n.wq_max_time)
+ d.QSampled = int64(n.q_sampled)
+ d.Version = int64(n.version)
+
+ return d
+}
+
+func perfstatfcstat2fcadapter(n *C.perfstat_fcstat_t) FCAdapter {
+ var f FCAdapter
+
+ f.Version = int64(n.version)
+ f.Name = C.GoString(&n.name[0])
+ f.State = int32(n.state)
+ f.InputRequests = int64(n.InputRequests)
+ f.OutputRequests = int64(n.OutputRequests)
+ f.InputBytes = int64(n.InputBytes)
+ f.OutputBytes = int64(n.OutputBytes)
+ f.EffMaxTransfer = int64(n.EffMaxTransfer)
+ f.NoDMAResourceCnt = int64(n.NoDMAResourceCnt)
+ f.NoCmdResourceCnt = int64(n.NoCmdResourceCnt)
+ f.AttentionType = int32(n.AttentionType)
+ f.SecondsSinceLastReset = int64(n.SecondsSinceLastReset)
+ f.TxFrames = int64(n.TxFrames)
+ f.TxWords = int64(n.TxWords)
+ f.RxFrames = int64(n.RxFrames)
+ f.RxWords = int64(n.RxWords)
+ f.LIPCount = int64(n.LIPCount)
+ f.NOSCount = int64(n.NOSCount)
+ f.ErrorFrames = int64(n.ErrorFrames)
+ f.DumpedFrames = int64(n.DumpedFrames)
+ f.LinkFailureCount = int64(n.LinkFailureCount)
+ f.LossofSyncCount = int64(n.LossofSyncCount)
+ f.LossofSignal = int64(n.LossofSignal)
+ f.PrimitiveSeqProtocolErrCount = int64(n.PrimitiveSeqProtocolErrCount)
+ f.InvalidTxWordCount = int64(n.InvalidTxWordCount)
+ f.InvalidCRCCount = int64(n.InvalidCRCCount)
+ f.PortFcId = int64(n.PortFcId)
+ f.PortSpeed = int64(n.PortSpeed)
+ f.PortType = C.GoString(&n.PortType[0])
+ f.PortWWN = int64(n.PortWWN)
+ f.PortSupportedSpeed = int64(n.PortSupportedSpeed)
+ f.AdapterType = int(n.adapter_type)
+ f.VfcName = C.GoString(&n.vfc_name[0])
+ f.ClientPartName = C.GoString(&n.client_part_name[0])
+
+ return f
+}
+
+func perfstatlogicalvolume2logicalvolume(n *C.perfstat_logicalvolume_t) LogicalVolume {
+ var l LogicalVolume
+
+ l.Name = C.GoString(&n.name[0])
+ l.VGName = C.GoString(&n.vgname[0])
+ l.OpenClose = int64(n.open_close)
+ l.State = int64(n.state)
+ l.MirrorPolicy = int64(n.mirror_policy)
+ l.MirrorWriteConsistency = int64(n.mirror_write_consistency)
+ l.WriteVerify = int64(n.write_verify)
+ l.PPsize = int64(n.ppsize)
+ l.LogicalPartitions = int64(n.logical_partitions)
+ l.Mirrors = int32(n.mirrors)
+ l.IOCnt = int64(n.iocnt)
+ l.KBReads = int64(n.kbreads)
+ l.KBWrites = int64(n.kbwrites)
+ l.Version = int64(n.version)
+
+ return l
+}
+
+func perfstatvolumegroup2volumegroup(n *C.perfstat_volumegroup_t) VolumeGroup {
+ var v VolumeGroup
+
+ v.Name = C.GoString(&n.name[0])
+ v.TotalDisks = int64(n.total_disks)
+ v.ActiveDisks = int64(n.active_disks)
+ v.TotalLogicalVolumes = int64(n.total_logical_volumes)
+ v.OpenedLogicalVolumes = int64(n.opened_logical_volumes)
+ v.IOCnt = int64(n.iocnt)
+ v.KBReads = int64(n.kbreads)
+ v.KBWrites = int64(n.kbwrites)
+ v.Version = int64(n.version)
+ v.VariedState = int(n.variedState)
+
+ return v
+}
+
+func perfstatmemorypage2memorypage(n *C.perfstat_memory_page_t) MemoryPage {
+ var m MemoryPage
+
+ m.PSize = int64(n.psize)
+ m.RealTotal = int64(n.real_total)
+ m.RealFree = int64(n.real_free)
+ m.RealPinned = int64(n.real_pinned)
+ m.RealInUse = int64(n.real_inuse)
+ m.PgExct = int64(n.pgexct)
+ m.PgIns = int64(n.pgins)
+ m.PgOuts = int64(n.pgouts)
+ m.PgSpIns = int64(n.pgspins)
+ m.PgSpOuts = int64(n.pgspouts)
+ m.Scans = int64(n.scans)
+ m.Cycles = int64(n.cycles)
+ m.PgSteals = int64(n.pgsteals)
+ m.NumPerm = int64(n.numperm)
+ m.NumPgSp = int64(n.numpgsp)
+ m.RealSystem = int64(n.real_system)
+ m.RealUser = int64(n.real_user)
+ m.RealProcess = int64(n.real_process)
+ m.VirtActive = int64(n.virt_active)
+ m.ComprsdTotal = int64(n.comprsd_total)
+ m.ComprsdWsegPgs = int64(n.comprsd_wseg_pgs)
+ m.CPgIns = int64(n.cpgins)
+ m.CPgOuts = int64(n.cpgouts)
+ m.CPoolInUse = int64(n.cpool_inuse)
+ m.UCPoolSize = int64(n.ucpool_size)
+ m.ComprsdWsegSize = int64(n.comprsd_wseg_size)
+ m.Version = int64(n.version)
+ m.RealAvail = int64(n.real_avail)
+
+ return m
+}
+
+func perfstatnetbuffer2netbuffer(n *C.perfstat_netbuffer_t) NetBuffer {
+ var b NetBuffer
+
+ b.Name = C.GoString(&n.name[0])
+ b.InUse = int64(n.inuse)
+ b.Calls = int64(n.calls)
+ b.Delayed = int64(n.delayed)
+ b.Free = int64(n.free)
+ b.Failed = int64(n.failed)
+ b.HighWatermark = int64(n.highwatermark)
+ b.Freed = int64(n.freed)
+ b.Version = int64(n.version)
+
+ return b
+}
+
+func perfstatnetinterface2netiface(n *C.perfstat_netinterface_t) NetIface {
+ var i NetIface
+
+ i.Name = C.GoString(&n.name[0])
+ i.Description = C.GoString(&n.description[0])
+ i.Type = uint8(n._type)
+ i.MTU = int64(n.mtu)
+ i.IPackets = int64(n.ipackets)
+ i.IBytes = int64(n.ibytes)
+ i.IErrors = int64(n.ierrors)
+ i.OPackets = int64(n.opackets)
+ i.OBytes = int64(n.obytes)
+ i.OErrors = int64(n.oerrors)
+ i.Collisions = int64(n.collisions)
+ i.Bitrate = int64(n.bitrate)
+ i.XmitDrops = int64(n.xmitdrops)
+ i.Version = int64(n.version)
+ i.IfIqDrops = int64(n.if_iqdrops)
+ i.IfArpDrops = int64(n.if_arpdrops)
+
+ return i
+}
+
+func perfstatnetadapter2netadapter(n *C.perfstat_netadapter_t) NetAdapter {
+ var i NetAdapter
+
+ i.Version = int64(n.version)
+ i.Name = C.GoString(&n.name[0])
+ i.TxPackets = int64(n.tx_packets)
+ i.TxBytes = int64(n.tx_bytes)
+ i.TxInterrupts = int64(n.tx_interrupts)
+ i.TxErrors = int64(n.tx_errors)
+ i.TxPacketsDropped = int64(n.tx_packets_dropped)
+ i.TxQueueSize = int64(n.tx_queue_size)
+ i.TxQueueLen = int64(n.tx_queue_len)
+ i.TxQueueOverflow = int64(n.tx_queue_overflow)
+ i.TxBroadcastPackets = int64(n.tx_broadcast_packets)
+ i.TxMulticastPackets = int64(n.tx_multicast_packets)
+ i.TxCarrierSense = int64(n.tx_carrier_sense)
+ i.TxDMAUnderrun = int64(n.tx_DMA_underrun)
+ i.TxLostCTSErrors = int64(n.tx_lost_CTS_errors)
+ i.TxMaxCollisionErrors = int64(n.tx_max_collision_errors)
+ i.TxLateCollisionErrors = int64(n.tx_late_collision_errors)
+ i.TxDeferred = int64(n.tx_deferred)
+ i.TxTimeoutErrors = int64(n.tx_timeout_errors)
+ i.TxSingleCollisionCount = int64(n.tx_single_collision_count)
+ i.TxMultipleCollisionCount = int64(n.tx_multiple_collision_count)
+ i.RxPackets = int64(n.rx_packets)
+ i.RxBytes = int64(n.rx_bytes)
+ i.RxInterrupts = int64(n.rx_interrupts)
+ i.RxErrors = int64(n.rx_errors)
+ i.RxPacketsDropped = int64(n.rx_packets_dropped)
+ i.RxBadPackets = int64(n.rx_bad_packets)
+ i.RxMulticastPackets = int64(n.rx_multicast_packets)
+ i.RxBroadcastPackets = int64(n.rx_broadcast_packets)
+ i.RxCRCErrors = int64(n.rx_CRC_errors)
+ i.RxDMAOverrun = int64(n.rx_DMA_overrun)
+ i.RxAlignmentErrors = int64(n.rx_alignment_errors)
+ i.RxNoResourceErrors = int64(n.rx_noresource_errors)
+ i.RxCollisionErrors = int64(n.rx_collision_errors)
+ i.RxPacketTooShortErrors = int64(n.rx_packet_tooshort_errors)
+ i.RxPacketTooLongErrors = int64(n.rx_packet_toolong_errors)
+ i.RxPacketDiscardedByAdapter = int64(n.rx_packets_discardedbyadapter)
+ i.AdapterType = int32(n.adapter_type)
+
+ return i
+}
+
+func perfstatpagingspace2pagingspace(n *C.perfstat_pagingspace_t) PagingSpace {
+ var i PagingSpace
+
+ i.Name = C.GoString(&n.name[0])
+ i.Type = uint8(n._type)
+ i.VGName = C.GoString(C.get_ps_vgname(n))
+ i.Hostname = C.GoString(C.get_ps_hostname(n))
+ i.Filename = C.GoString(C.get_ps_filename(n))
+ i.LPSize = int64(n.lp_size)
+ i.MBSize = int64(n.mb_size)
+ i.MBUsed = int64(n.mb_used)
+ i.IOPending = int64(n.io_pending)
+ i.Active = uint8(n.active)
+ i.Automatic = uint8(n.automatic)
+ i.Version = int64(n.version)
+
+ return i
+}
+
+func perfstatprocess2process(n *C.perfstat_process_t) Process {
+ var i Process
+
+ i.Version = int64(n.version)
+ i.PID = int64(n.pid)
+ i.ProcessName = C.GoString(&n.proc_name[0])
+ i.Priority = int32(n.proc_priority)
+ i.NumThreads = int64(n.num_threads)
+ i.UID = int64(n.proc_uid)
+ i.ClassID = int64(n.proc_classid)
+ i.Size = int64(n.proc_size)
+ i.RealMemData = int64(n.proc_real_mem_data)
+ i.RealMemText = int64(n.proc_real_mem_text)
+ i.VirtMemData = int64(n.proc_virt_mem_data)
+ i.VirtMemText = int64(n.proc_virt_mem_text)
+ i.SharedLibDataSize = int64(n.shared_lib_data_size)
+ i.HeapSize = int64(n.heap_size)
+ i.RealInUse = int64(n.real_inuse)
+ i.VirtInUse = int64(n.virt_inuse)
+ i.Pinned = int64(n.pinned)
+ i.PgSpInUse = int64(n.pgsp_inuse)
+ i.FilePages = int64(n.filepages)
+ i.RealInUseMap = int64(n.real_inuse_map)
+ i.VirtInUseMap = int64(n.virt_inuse_map)
+ i.PinnedInUseMap = int64(n.pinned_inuse_map)
+ i.UCpuTime = float64(n.ucpu_time)
+ i.SCpuTime = float64(n.scpu_time)
+ i.LastTimeBase = int64(n.last_timebase)
+ i.InBytes = int64(n.inBytes)
+ i.OutBytes = int64(n.outBytes)
+ i.InOps = int64(n.inOps)
+ i.OutOps = int64(n.outOps)
+
+ return i
+}
+
+func perfstatthread2thread(n *C.perfstat_thread_t) Thread {
+ var i Thread
+
+ i.TID = int64(n.tid)
+ i.PID = int64(n.pid)
+ i.CpuID = int64(n.cpuid)
+ i.UCpuTime = float64(n.ucpu_time)
+ i.SCpuTime = float64(n.scpu_time)
+ i.LastTimeBase = int64(n.last_timebase)
+ i.Version = int64(n.version)
+
+ return i
+}
+
+func fsinfo2filesystem(n *C.struct_fsinfo) FileSystem {
+ var i FileSystem
+
+ i.Device = C.GoString(n.devname)
+ i.MountPoint = C.GoString(n.fsname)
+ i.FSType = int(n.fstype)
+ i.Flags = uint(n.flags)
+ i.TotalBlocks = int64(n.totalblks)
+ i.FreeBlocks = int64(n.freeblks)
+ i.TotalInodes = int64(n.totalinodes)
+ i.FreeInodes = int64(n.freeinodes)
+
+ return i
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/lparstat.go b/test/integration/vendor/github.com/power-devops/perfstat/lparstat.go
new file mode 100644
index 000000000..0ce35e3c5
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/lparstat.go
@@ -0,0 +1,26 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func PartitionStat() (*PartitionConfig, error) {
+ var part C.perfstat_partition_config_t
+
+ rc := C.perfstat_partition_config(nil, &part, C.sizeof_perfstat_partition_config_t, 1)
+ if rc != 1 {
+ return nil, fmt.Errorf("perfstat_partition_config() error")
+ }
+ p := perfstatpartitionconfig2partitionconfig(part)
+ return &p, nil
+
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/lvmstat.go b/test/integration/vendor/github.com/power-devops/perfstat/lvmstat.go
new file mode 100644
index 000000000..eb2064c80
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/lvmstat.go
@@ -0,0 +1,72 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+#include
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+func LogicalVolumeStat() ([]LogicalVolume, error) {
+ var lv *C.perfstat_logicalvolume_t
+ var lvname C.perfstat_id_t
+
+ numlvs := C.perfstat_logicalvolume(nil, nil, C.sizeof_perfstat_logicalvolume_t, 0)
+ if numlvs <= 0 {
+ return nil, fmt.Errorf("perfstat_logicalvolume() error")
+ }
+
+ lv_len := C.sizeof_perfstat_logicalvolume_t * C.ulong(numlvs)
+ lv = (*C.perfstat_logicalvolume_t)(C.malloc(lv_len))
+ defer C.free(unsafe.Pointer(lv))
+ C.strcpy(&lvname.name[0], C.CString(""))
+ r := C.perfstat_logicalvolume(&lvname, lv, C.sizeof_perfstat_logicalvolume_t, numlvs)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_logicalvolume() error")
+ }
+ lvs := make([]LogicalVolume, r)
+ for i := 0; i < int(r); i++ {
+ l := C.get_logicalvolume_stat(lv, C.int(i))
+ if l != nil {
+ lvs[i] = perfstatlogicalvolume2logicalvolume(l)
+ }
+ }
+ return lvs, nil
+}
+
+func VolumeGroupStat() ([]VolumeGroup, error) {
+ var vg *C.perfstat_volumegroup_t
+ var vgname C.perfstat_id_t
+
+ numvgs := C.perfstat_volumegroup(nil, nil, C.sizeof_perfstat_volumegroup_t, 0)
+ if numvgs <= 0 {
+ return nil, fmt.Errorf("perfstat_volumegroup() error")
+ }
+
+ vg_len := C.sizeof_perfstat_volumegroup_t * C.ulong(numvgs)
+ vg = (*C.perfstat_volumegroup_t)(C.malloc(vg_len))
+ defer C.free(unsafe.Pointer(vg))
+ C.strcpy(&vgname.name[0], C.CString(""))
+ r := C.perfstat_volumegroup(&vgname, vg, C.sizeof_perfstat_volumegroup_t, numvgs)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_volumegroup() error")
+ }
+ vgs := make([]VolumeGroup, r)
+ for i := 0; i < int(r); i++ {
+ v := C.get_volumegroup_stat(vg, C.int(i))
+ if v != nil {
+ vgs[i] = perfstatvolumegroup2volumegroup(v)
+ }
+ }
+ return vgs, nil
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/memstat.go b/test/integration/vendor/github.com/power-devops/perfstat/memstat.go
new file mode 100644
index 000000000..d211a73aa
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/memstat.go
@@ -0,0 +1,84 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+#include
+
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+func MemoryTotalStat() (*MemoryTotal, error) {
+ var memory C.perfstat_memory_total_t
+
+ rc := C.perfstat_memory_total(nil, &memory, C.sizeof_perfstat_memory_total_t, 1)
+ if rc != 1 {
+ return nil, fmt.Errorf("perfstat_memory_total() error")
+ }
+ m := perfstatmemorytotal2memorytotal(memory)
+ return &m, nil
+}
+
+func MemoryPageStat() ([]MemoryPage, error) {
+ var mempage *C.perfstat_memory_page_t
+ var fps C.perfstat_psize_t
+
+ numps := C.perfstat_memory_page(nil, nil, C.sizeof_perfstat_memory_page_t, 0)
+ if numps < 1 {
+ return nil, fmt.Errorf("perfstat_memory_page() error")
+ }
+
+ mp_len := C.sizeof_perfstat_memory_page_t * C.ulong(numps)
+ mempage = (*C.perfstat_memory_page_t)(C.malloc(mp_len))
+ defer C.free(unsafe.Pointer(mempage))
+ fps.psize = C.FIRST_PSIZE
+ r := C.perfstat_memory_page(&fps, mempage, C.sizeof_perfstat_memory_page_t, numps)
+ if r < 1 {
+ return nil, fmt.Errorf("perfstat_memory_page() error")
+ }
+ ps := make([]MemoryPage, r)
+ for i := 0; i < int(r); i++ {
+ p := C.get_memory_page_stat(mempage, C.int(i))
+ if p != nil {
+ ps[i] = perfstatmemorypage2memorypage(p)
+ }
+ }
+ return ps, nil
+}
+
+func PagingSpaceStat() ([]PagingSpace, error) {
+ var pspace *C.perfstat_pagingspace_t
+ var fps C.perfstat_id_t
+
+ numps := C.perfstat_pagingspace(nil, nil, C.sizeof_perfstat_pagingspace_t, 0)
+ if numps <= 0 {
+ return nil, fmt.Errorf("perfstat_pagingspace() error")
+ }
+
+ ps_len := C.sizeof_perfstat_pagingspace_t * C.ulong(numps)
+ pspace = (*C.perfstat_pagingspace_t)(C.malloc(ps_len))
+ defer C.free(unsafe.Pointer(pspace))
+ C.strcpy(&fps.name[0], C.CString(C.FIRST_PAGINGSPACE))
+ r := C.perfstat_pagingspace(&fps, pspace, C.sizeof_perfstat_pagingspace_t, numps)
+ if r < 1 {
+ return nil, fmt.Errorf("perfstat_pagingspace() error")
+ }
+ ps := make([]PagingSpace, r)
+ for i := 0; i < int(r); i++ {
+ p := C.get_pagingspace_stat(pspace, C.int(i))
+ if p != nil {
+ ps[i] = perfstatpagingspace2pagingspace(p)
+ }
+ }
+ return ps, nil
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/netstat.go b/test/integration/vendor/github.com/power-devops/perfstat/netstat.go
new file mode 100644
index 000000000..4070da211
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/netstat.go
@@ -0,0 +1,117 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+#include
+
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+func NetIfaceTotalStat() (*NetIfaceTotal, error) {
+ var nif C.perfstat_netinterface_total_t
+
+ rc := C.perfstat_netinterface_total(nil, &nif, C.sizeof_perfstat_netinterface_total_t, 1)
+ if rc != 1 {
+ return nil, fmt.Errorf("perfstat_netinterface_total() error")
+ }
+ n := perfstatnetinterfacetotal2netifacetotal(nif)
+ return &n, nil
+}
+
+func NetBufferStat() ([]NetBuffer, error) {
+ var nbuf *C.perfstat_netbuffer_t
+ var first C.perfstat_id_t
+
+ numbuf := C.perfstat_netbuffer(nil, nil, C.sizeof_perfstat_netbuffer_t, 0)
+ if numbuf < 1 {
+ return nil, fmt.Errorf("perfstat_netbuffer() error")
+ }
+
+ nblen := C.sizeof_perfstat_netbuffer_t * C.ulong(numbuf)
+ nbuf = (*C.perfstat_netbuffer_t)(C.malloc(nblen))
+ defer C.free(unsafe.Pointer(nbuf))
+ C.strcpy(&first.name[0], C.CString(C.FIRST_NETBUFFER))
+ r := C.perfstat_netbuffer(&first, nbuf, C.sizeof_perfstat_netbuffer_t, numbuf)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_netbuffer() error")
+ }
+ nb := make([]NetBuffer, r)
+ for i := 0; i < int(r); i++ {
+ b := C.get_netbuffer_stat(nbuf, C.int(i))
+ if b != nil {
+ nb[i] = perfstatnetbuffer2netbuffer(b)
+ }
+ }
+ return nb, nil
+}
+
+func NetIfaceStat() ([]NetIface, error) {
+ var nif *C.perfstat_netinterface_t
+ var first C.perfstat_id_t
+
+ numif := C.perfstat_netinterface(nil, nil, C.sizeof_perfstat_netinterface_t, 0)
+ if numif < 0 {
+ return nil, fmt.Errorf("perfstat_netinterface() error")
+ }
+ if numif == 0 {
+ return []NetIface{}, fmt.Errorf("no network interfaces found")
+ }
+
+ iflen := C.sizeof_perfstat_netinterface_t * C.ulong(numif)
+ nif = (*C.perfstat_netinterface_t)(C.malloc(iflen))
+ defer C.free(unsafe.Pointer(nif))
+ C.strcpy(&first.name[0], C.CString(C.FIRST_NETINTERFACE))
+ r := C.perfstat_netinterface(&first, nif, C.sizeof_perfstat_netinterface_t, numif)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_netinterface() error")
+ }
+ ifs := make([]NetIface, r)
+ for i := 0; i < int(r); i++ {
+ b := C.get_netinterface_stat(nif, C.int(i))
+ if b != nil {
+ ifs[i] = perfstatnetinterface2netiface(b)
+ }
+ }
+ return ifs, nil
+}
+
+func NetAdapterStat() ([]NetAdapter, error) {
+ var adapters *C.perfstat_netadapter_t
+ var first C.perfstat_id_t
+
+ numad := C.perfstat_netadapter(nil, nil, C.sizeof_perfstat_netadapter_t, 0)
+ if numad < 0 {
+ return nil, fmt.Errorf("perfstat_netadater() error")
+ }
+ if numad == 0 {
+ return []NetAdapter{}, fmt.Errorf("no network adapters found")
+ }
+
+ adplen := C.sizeof_perfstat_netadapter_t * C.ulong(numad)
+ adapters = (*C.perfstat_netadapter_t)(C.malloc(adplen))
+ defer C.free(unsafe.Pointer(adapters))
+ C.strcpy(&first.name[0], C.CString(C.FIRST_NETINTERFACE))
+ r := C.perfstat_netadapter(&first, adapters, C.sizeof_perfstat_netadapter_t, numad)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_netadapter() error")
+ }
+ ads := make([]NetAdapter, r)
+ for i := 0; i < int(r); i++ {
+ b := C.get_netadapter_stat(adapters, C.int(i))
+ if b != nil {
+ ads[i] = perfstatnetadapter2netadapter(b)
+ }
+ }
+ return ads, nil
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/procstat.go b/test/integration/vendor/github.com/power-devops/perfstat/procstat.go
new file mode 100644
index 000000000..ecafebd8d
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/procstat.go
@@ -0,0 +1,75 @@
+// +build aix
+
+package perfstat
+
+/*
+#cgo LDFLAGS: -lperfstat
+
+#include
+#include
+#include
+
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+func ProcessStat() ([]Process, error) {
+ var proc *C.perfstat_process_t
+ var first C.perfstat_id_t
+
+ numproc := C.perfstat_process(nil, nil, C.sizeof_perfstat_process_t, 0)
+ if numproc < 1 {
+ return nil, fmt.Errorf("perfstat_process() error")
+ }
+
+ plen := C.sizeof_perfstat_process_t * C.ulong(numproc)
+ proc = (*C.perfstat_process_t)(C.malloc(plen))
+ defer C.free(unsafe.Pointer(proc))
+ C.strcpy(&first.name[0], C.CString(""))
+ r := C.perfstat_process(&first, proc, C.sizeof_perfstat_process_t, numproc)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_process() error")
+ }
+
+ ps := make([]Process, r)
+ for i := 0; i < int(r); i++ {
+ p := C.get_process_stat(proc, C.int(i))
+ if p != nil {
+ ps[i] = perfstatprocess2process(p)
+ }
+ }
+ return ps, nil
+}
+
+func ThreadStat() ([]Thread, error) {
+ var thread *C.perfstat_thread_t
+ var first C.perfstat_id_t
+
+ numthr := C.perfstat_thread(nil, nil, C.sizeof_perfstat_thread_t, 0)
+ if numthr < 1 {
+ return nil, fmt.Errorf("perfstat_thread() error")
+ }
+
+ thlen := C.sizeof_perfstat_thread_t * C.ulong(numthr)
+ thread = (*C.perfstat_thread_t)(C.malloc(thlen))
+ defer C.free(unsafe.Pointer(thread))
+ C.strcpy(&first.name[0], C.CString(""))
+ r := C.perfstat_thread(&first, thread, C.sizeof_perfstat_thread_t, numthr)
+ if r < 0 {
+ return nil, fmt.Errorf("perfstat_thread() error")
+ }
+
+ th := make([]Thread, r)
+ for i := 0; i < int(r); i++ {
+ t := C.get_thread_stat(thread, C.int(i))
+ if t != nil {
+ th[i] = perfstatthread2thread(t)
+ }
+ }
+ return th, nil
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/sysconf.go b/test/integration/vendor/github.com/power-devops/perfstat/sysconf.go
new file mode 100644
index 000000000..c7454d03d
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/sysconf.go
@@ -0,0 +1,195 @@
+// +build aix
+
+package perfstat
+
+/*
+#include
+*/
+import "C"
+
+import "fmt"
+
+const (
+ SC_ARG_MAX = 0
+ SC_CHILD_MAX = 1
+ SC_CLK_TCK = 2
+ SC_NGROUPS_MAX = 3
+ SC_OPEN_MAX = 4
+ SC_STREAM_MAX = 5
+ SC_TZNAME_MAX = 6
+ SC_JOB_CONTROL = 7
+ SC_SAVED_IDS = 8
+ SC_VERSION = 9
+ SC_POSIX_ARG_MAX = 10
+ SC_POSIX_CHILD_MAX = 11
+ SC_POSIX_LINK_MAX = 12
+ SC_POSIX_MAX_CANON = 13
+ SC_POSIX_MAX_INPUT = 14
+ SC_POSIX_NAME_MAX = 15
+ SC_POSIX_NGROUPS_MAX = 16
+ SC_POSIX_OPEN_MAX = 17
+ SC_POSIX_PATH_MAX = 18
+ SC_POSIX_PIPE_BUF = 19
+ SC_POSIX_SSIZE_MAX = 20
+ SC_POSIX_STREAM_MAX = 21
+ SC_POSIX_TZNAME_MAX = 22
+ SC_BC_BASE_MAX = 23
+ SC_BC_DIM_MAX = 24
+ SC_BC_SCALE_MAX = 25
+ SC_BC_STRING_MAX = 26
+ SC_EQUIV_CLASS_MAX = 27
+ SC_EXPR_NEST_MAX = 28
+ SC_LINE_MAX = 29
+ SC_RE_DUP_MAX = 30
+ SC_2_VERSION = 31
+ SC_2_C_DEV = 32
+ SC_2_FORT_DEV = 33
+ SC_2_FORT_RUN = 34
+ SC_2_LOCALEDEF = 35
+ SC_2_SW_DEV = 36
+ SC_POSIX2_BC_BASE_MAX = 37
+ SC_POSIX2_BC_DIM_MAX = 38
+ SC_POSIX2_BC_SCALE_MAX = 39
+ SC_POSIX2_BC_STRING_MAX = 40
+ SC_POSIX2_BC_EQUIV_CLASS_MAX = 41
+ SC_POSIX2_BC_EXPR_NEST_MAX = 42
+ SC_POSIX2_BC_LINE_MAX = 43
+ SC_POSIX2_BC_RE_DUP_MAX = 44
+ SC_PASS_MAX = 45
+ SC_XOPEN_VERSION = 46
+ SC_ATEXIT_MAX = 47
+ SC_PAGE_SIZE = 48
+ SC_PAGESIZE = SC_PAGE_SIZE
+ SC_AES_OS_VERSION = 49
+ SC_COLL_WEIGHTS_MAX = 50
+ SC_2_C_WIND = 51
+ SC_2_C_VERSION = 52
+ SC_2_UPE = 53
+ SC_2_CHAR_TERM = 54
+ SC_XOPEN_SHM = 55
+ SC_XOPEN_CRYPT = 56
+ SC_XOPEN_ENH_I18N = 57
+ SC_IOV_MAX = 58
+ SC_THREAD_SAFE_FUNCTIONS = 59
+ SC_THREADS = 60
+ SC_THREAD_ATTR_STACKADDR = 61
+ SC_THREAD_ATTR_STACKSIZE = 62
+ SC_THREAD_FORKALL = 63
+ SC_THREAD_PRIORITY_SCHEDULING = 64
+ SC_THREAD_PRIO_INHERIT = 65
+ SC_THREAD_PRIO_PROTECT = 66
+ SC_THREAD_PROCESS_SHARED = 67
+ SC_THREAD_KEYS_MAX = 68
+ SC_THREAD_DATAKEYS_MAX = SC_THREAD_KEYS_MAX
+ SC_THREAD_STACK_MIN = 69
+ SC_THREAD_THREADS_MAX = 70
+ SC_NPROCESSORS_CONF = 71
+ SC_NPROCESSORS_ONLN = 72
+ SC_XOPEN_UNIX = 73
+ SC_AIO_LISTIO_MAX = 75
+ SC_AIO_MAX = 76
+ SC_AIO_PRIO_DELTA_MAX = 77
+ SC_ASYNCHRONOUS_IO = 78
+ SC_DELAYTIMER_MAX = 79
+ SC_FSYNC = 80
+ SC_GETGR_R_SIZE_MAX = 81
+ SC_GETPW_R_SIZE_MAX = 82
+ SC_LOGIN_NAME_MAX = 83
+ SC_MAPPED_FILES = 84
+ SC_MEMLOCK = 85
+ SC_MEMLOCK_RANGE = 86
+ SC_MEMORY_PROTECTION = 87
+ SC_MESSAGE_PASSING = 88
+ SC_MQ_OPEN_MAX = 89
+ SC_MQ_PRIO_MAX = 90
+ SC_PRIORITIZED_IO = 91
+ SC_PRIORITY_SCHEDULING = 92
+ SC_REALTIME_SIGNALS = 93
+ SC_RTSIG_MAX = 94
+ SC_SEMAPHORES = 95
+ SC_SEM_NSEMS_MAX = 96
+ SC_SEM_VALUE_MAX = 97
+ SC_SHARED_MEMORY_OBJECTS = 98
+ SC_SIGQUEUE_MAX = 99
+ SC_SYNCHRONIZED_IO = 100
+ SC_THREAD_DESTRUCTOR_ITERATIONS = 101
+ SC_TIMERS = 102
+ SC_TIMER_MAX = 103
+ SC_TTY_NAME_MAX = 104
+ SC_XBS5_ILP32_OFF32 = 105
+ SC_XBS5_ILP32_OFFBIG = 106
+ SC_XBS5_LP64_OFF64 = 107
+ SC_XBS5_LPBIG_OFFBIG = 108
+ SC_XOPEN_XCU_VERSION = 109
+ SC_XOPEN_REALTIME = 110
+ SC_XOPEN_REALTIME_THREADS = 111
+ SC_XOPEN_LEGACY = 112
+ SC_REENTRANT_FUNCTIONS = SC_THREAD_SAFE_FUNCTIONS
+ SC_PHYS_PAGES = 113
+ SC_AVPHYS_PAGES = 114
+ SC_LPAR_ENABLED = 115
+ SC_LARGE_PAGESIZE = 116
+ SC_AIX_KERNEL_BITMODE = 117
+ SC_AIX_REALMEM = 118
+ SC_AIX_HARDWARE_BITMODE = 119
+ SC_AIX_MP_CAPABLE = 120
+ SC_V6_ILP32_OFF32 = 121
+ SC_V6_ILP32_OFFBIG = 122
+ SC_V6_LP64_OFF64 = 123
+ SC_V6_LPBIG_OFFBIG = 124
+ SC_XOPEN_STREAMS = 125
+ SC_HOST_NAME_MAX = 126
+ SC_REGEXP = 127
+ SC_SHELL = 128
+ SC_SYMLOOP_MAX = 129
+ SC_ADVISORY_INFO = 130
+ SC_FILE_LOCKING = 131
+ SC_2_PBS = 132
+ SC_2_PBS_ACCOUNTING = 133
+ SC_2_PBS_CHECKPOINT = 134
+ SC_2_PBS_LOCATE = 135
+ SC_2_PBS_MESSAGE = 136
+ SC_2_PBS_TRACK = 137
+ SC_BARRIERS = 138
+ SC_CLOCK_SELECTION = 139
+ SC_CPUTIME = 140
+ SC_MONOTONIC_CLOCK = 141
+ SC_READER_WRITER_LOCKS = 142
+ SC_SPAWN = 143
+ SC_SPIN_LOCKS = 144
+ SC_SPORADIC_SERVER = 145
+ SC_THREAD_CPUTIME = 146
+ SC_THREAD_SPORADIC_SERVER = 147
+ SC_TIMEOUTS = 148
+ SC_TRACE = 149
+ SC_TRACE_EVENT_FILTER = 150
+ SC_TRACE_INHERIT = 151
+ SC_TRACE_LOG = 152
+ SC_TYPED_MEMORY_OBJECTS = 153
+ SC_IPV6 = 154
+ SC_RAW_SOCKETS = 155
+ SC_SS_REPL_MAX = 156
+ SC_TRACE_EVENT_NAME_MAX = 157
+ SC_TRACE_NAME_MAX = 158
+ SC_TRACE_SYS_MAX = 159
+ SC_TRACE_USER_EVENT_MAX = 160
+ SC_AIX_UKEYS = 161
+ SC_AIX_ENHANCED_AFFINITY = 162
+ SC_V7_ILP32_OFF32 = 163
+ SC_V7_ILP32_OFFBIG = 164
+ SC_V7_LP64_OFF64 = 165
+ SC_V7_LPBIG_OFFBIG = 166
+ SC_THREAD_ROBUST_PRIO_INHERIT = 167
+ SC_THREAD_ROBUST_PRIO_PROTECT = 168
+ SC_XOPEN_UUCP = 169
+ SC_XOPEN_ARMOR = 170
+)
+
+func Sysconf(name int32) (int64, error) {
+ r := C.sysconf(C.int(name))
+ if r == -1 {
+ return 0, fmt.Errorf("sysconf error")
+ } else {
+ return int64(r), nil
+ }
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/systemcfg.go b/test/integration/vendor/github.com/power-devops/perfstat/systemcfg.go
new file mode 100644
index 000000000..6287eb46a
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/systemcfg.go
@@ -0,0 +1,635 @@
+// +build aix
+
+package perfstat
+
+import "golang.org/x/sys/unix"
+
+// function Getsystemcfg() is defined in golang.org/x/sys/unix
+// we define here just missing constants for the function and some helpers
+
+// Calls to getsystemcfg()
+const (
+ SC_ARCH = 1 /* processor architecture */
+ SC_IMPL = 2 /* processor implementation */
+ SC_VERS = 3 /* processor version */
+ SC_WIDTH = 4 /* width (32 || 64) */
+ SC_NCPUS = 5 /* 1 = UP, n = n-way MP */
+ SC_L1C_ATTR = 6 /* L1 cache attributes (bit flags) */
+ SC_L1C_ISZ = 7 /* size of L1 instruction cache */
+ SC_L1C_DSZ = 8 /* size of L1 data cache */
+ SC_L1C_ICA = 9 /* L1 instruction cache associativity */
+ SC_L1C_DCA = 10 /* L1 data cache associativity */
+ SC_L1C_IBS = 11 /* L1 instruction cache block size */
+ SC_L1C_DBS = 12 /* L1 data cache block size */
+ SC_L1C_ILS = 13 /* L1 instruction cache line size */
+ SC_L1C_DLS = 14 /* L1 data cache line size */
+ SC_L2C_SZ = 15 /* size of L2 cache, 0 = No L2 cache */
+ SC_L2C_AS = 16 /* L2 cache associativity */
+ SC_TLB_ATTR = 17 /* TLB attributes (bit flags) */
+ SC_ITLB_SZ = 18 /* entries in instruction TLB */
+ SC_DTLB_SZ = 19 /* entries in data TLB */
+ SC_ITLB_ATT = 20 /* instruction tlb associativity */
+ SC_DTLB_ATT = 21 /* data tlb associativity */
+ SC_RESRV_SZ = 22 /* size of reservation */
+ SC_PRI_LC = 23 /* spin lock count in supevisor mode */
+ SC_PRO_LC = 24 /* spin lock count in problem state */
+ SC_RTC_TYPE = 25 /* RTC type */
+ SC_VIRT_AL = 26 /* 1 if hardware aliasing is supported */
+ SC_CAC_CONG = 27 /* number of page bits for cache synonym */
+ SC_MOD_ARCH = 28 /* used by system for model determination */
+ SC_MOD_IMPL = 29 /* used by system for model determination */
+ SC_XINT = 30 /* used by system for time base conversion */
+ SC_XFRAC = 31 /* used by system for time base conversion */
+ SC_KRN_ATTR = 32 /* kernel attributes, see below */
+ SC_PHYSMEM = 33 /* bytes of OS available memory */
+ SC_SLB_ATTR = 34 /* SLB attributes */
+ SC_SLB_SZ = 35 /* size of slb (0 = no slb) */
+ SC_ORIG_NCPUS = 36 /* original number of CPUs */
+ SC_MAX_NCPUS = 37 /* max cpus supported by this AIX image */
+ SC_MAX_REALADDR = 38 /* max supported real memory address +1 */
+ SC_ORIG_ENT_CAP = 39 /* configured entitled processor capacity at boot required by cross-partition LPAR tools. */
+ SC_ENT_CAP = 40 /* entitled processor capacity */
+ SC_DISP_WHE = 41 /* Dispatch wheel time period (TB units) */
+ SC_CAPINC = 42 /* delta by which capacity can change */
+ SC_VCAPW = 43 /* priority weight for idle capacity distribution */
+ SC_SPLP_STAT = 44 /* State of SPLPAR enablement: 0x1 => 1=SPLPAR capable; 0=not, 0x2 => SPLPAR enabled 0=dedicated, 1=shared */
+ SC_SMT_STAT = 45 /* State of SMT enablement: 0x1 = SMT Capable 0=no/1=yes, 0x2 = SMT Enabled 0=no/1=yes, 0x4 = SMT threads bound true 0=no/1=yes */
+ SC_SMT_TC = 46 /* Number of SMT Threads per Physical CPU */
+ SC_VMX_VER = 47 /* RPA defined VMX version: 0 = VMX not available or disabled, 1 = VMX capable, 2 = VMX and VSX capable */
+ SC_LMB_SZ = 48 /* Size of an LMB on this system. */
+ SC_MAX_XCPU = 49 /* Number of exclusive cpus on line */
+ SC_EC_LVL = 50 /* Kernel error checking level */
+ SC_AME_STAT = 51 /* AME status */
+ SC_ECO_STAT = 52 /* extended cache options */
+ SC_DFP_STAT = 53 /* RPA defined DFP version, 0=none/disabled */
+ SC_VRM_STAT = 54 /* VRM Capable/enabled */
+ SC_PHYS_IMP = 55 /* physical processor implementation */
+ SC_PHYS_VER = 56 /* physical processor version */
+ SC_SPCM_STATUS = 57
+ SC_SPCM_MAX = 58
+ SC_TM_VER = 59 /* Transaction Memory version, 0 - not capable */
+ SC_NX_CAP = 60 /* NX GZIP capable */
+ SC_PKS_STATE = 61 /* Platform KeyStore */
+)
+
+/* kernel attributes */
+/* bit 0/1 meaning */
+/* -----------------------------------------*/
+/* 31 32-bit kernel / 64-bit kernel */
+/* 30 non-LPAR / LPAR */
+/* 29 old 64bit ABI / 64bit Large ABI */
+/* 28 non-NUMA / NUMA */
+/* 27 UP / MP */
+/* 26 no DR CPU add / DR CPU add support */
+/* 25 no DR CPU rm / DR CPU rm support */
+/* 24 no DR MEM add / DR MEM add support */
+/* 23 no DR MEM rm / DR MEM rm support */
+/* 22 kernel keys disabled / enabled */
+/* 21 no recovery / recovery enabled */
+/* 20 non-MLS / MLS enabled */
+/* 19 enhanced affinity indicator */
+/* 18 non-vTPM / vTPM enabled */
+/* 17 non-VIOS / VIOS */
+
+// Values for architecture field
+const (
+ ARCH_POWER_RS = 0x0001 /* Power Classic architecture */
+ ARCH_POWER_PC = 0x0002 /* Power PC architecture */
+ ARCH_IA64 = 0x0003 /* Intel IA64 architecture */
+)
+
+// Values for implementation field for POWER_PC Architectures
+const (
+ IMPL_POWER_RS1 = 0x00001 /* RS1 class CPU */
+ IMPL_POWER_RSC = 0x00002 /* RSC class CPU */
+ IMPL_POWER_RS2 = 0x00004 /* RS2 class CPU */
+ IMPL_POWER_601 = 0x00008 /* 601 class CPU */
+ IMPL_POWER_603 = 0x00020 /* 603 class CPU */
+ IMPL_POWER_604 = 0x00010 /* 604 class CPU */
+ IMPL_POWER_620 = 0x00040 /* 620 class CPU */
+ IMPL_POWER_630 = 0x00080 /* 630 class CPU */
+ IMPL_POWER_A35 = 0x00100 /* A35 class CPU */
+ IMPL_POWER_RS64II = 0x0200 /* RS64-II class CPU */
+ IMPL_POWER_RS64III = 0x0400 /* RS64-III class CPU */
+ IMPL_POWER4 = 0x0800 /* 4 class CPU */
+ IMPL_POWER_RS64IV = IMPL_POWER4 /* 4 class CPU */
+ IMPL_POWER_MPC7450 = 0x1000 /* MPC7450 class CPU */
+ IMPL_POWER5 = 0x2000 /* 5 class CPU */
+ IMPL_POWER6 = 0x4000 /* 6 class CPU */
+ IMPL_POWER7 = 0x8000 /* 7 class CPU */
+ IMPL_POWER8 = 0x10000 /* 8 class CPU */
+ IMPL_POWER9 = 0x20000 /* 9 class CPU */
+)
+
+// Values for implementation field for IA64 Architectures
+const (
+ IMPL_IA64_M1 = 0x0001 /* IA64 M1 class CPU (Itanium) */
+ IMPL_IA64_M2 = 0x0002 /* IA64 M2 class CPU */
+)
+
+// Values for the version field
+const (
+ PV_601 = 0x010001 /* Power PC 601 */
+ PV_601A = 0x010002 /* Power PC 601 */
+ PV_603 = 0x060000 /* Power PC 603 */
+ PV_604 = 0x050000 /* Power PC 604 */
+ PV_620 = 0x070000 /* Power PC 620 */
+ PV_630 = 0x080000 /* Power PC 630 */
+ PV_A35 = 0x090000 /* Power PC A35 */
+ PV_RS64II = 0x0A0000 /* Power PC RS64II */
+ PV_RS64III = 0x0B0000 /* Power PC RS64III */
+ PV_4 = 0x0C0000 /* Power PC 4 */
+ PV_RS64IV = PV_4 /* Power PC 4 */
+ PV_MPC7450 = 0x0D0000 /* Power PC MPC7450 */
+ PV_4_2 = 0x0E0000 /* Power PC 4 */
+ PV_4_3 = 0x0E0001 /* Power PC 4 */
+ PV_5 = 0x0F0000 /* Power PC 5 */
+ PV_5_2 = 0x0F0001 /* Power PC 5 */
+ PV_5_3 = 0x0F0002 /* Power PC 5 */
+ PV_6 = 0x100000 /* Power PC 6 */
+ PV_6_1 = 0x100001 /* Power PC 6 DD1.x */
+ PV_7 = 0x200000 /* Power PC 7 */
+ PV_8 = 0x300000 /* Power PC 8 */
+ PV_9 = 0x400000 /* Power PC 9 */
+ PV_5_Compat = 0x0F8000 /* Power PC 5 */
+ PV_6_Compat = 0x108000 /* Power PC 6 */
+ PV_7_Compat = 0x208000 /* Power PC 7 */
+ PV_8_Compat = 0x308000 /* Power PC 8 */
+ PV_9_Compat = 0x408000 /* Power PC 9 */
+ PV_RESERVED_2 = 0x0A0000 /* source compatability */
+ PV_RESERVED_3 = 0x0B0000 /* source compatability */
+ PV_RS2 = 0x040000 /* Power RS2 */
+ PV_RS1 = 0x020000 /* Power RS1 */
+ PV_RSC = 0x030000 /* Power RSC */
+ PV_M1 = 0x008000 /* Intel IA64 M1 */
+ PV_M2 = 0x008001 /* Intel IA64 M2 */
+)
+
+// Values for rtc_type
+const (
+ RTC_POWER = 1 /* rtc as defined by Power Arch. */
+ RTC_POWER_PC = 2 /* rtc as defined by Power PC Arch. */
+ RTC_IA64 = 3 /* rtc as defined by IA64 Arch. */
+)
+
+const NX_GZIP_PRESENT = 0x00000001
+
+const (
+ PKS_STATE_CAPABLE = 1
+ PKS_STATE_ENABLED = 2
+)
+
+// Macros for identifying physical processor
+const (
+ PPI4_1 = 0x35
+ PPI4_2 = 0x38
+ PPI4_3 = 0x39
+ PPI4_4 = 0x3C
+ PPI4_5 = 0x44
+ PPI5_1 = 0x3A
+ PPI5_2 = 0x3B
+ PPI6_1 = 0x3E
+ PPI7_1 = 0x3F
+ PPI7_2 = 0x4A
+ PPI8_1 = 0x4B
+ PPI8_2 = 0x4D
+ PPI9 = 0x4E
+)
+
+// Macros for kernel attributes
+const (
+ KERN_TYPE = 0x1
+ KERN_LPAR = 0x2
+ KERN_64BIT_LARGE_ABI = 0x4
+ KERN_NUMA = 0x8
+ KERN_UPMP = 0x10
+ KERN_DR_CPU_ADD = 0x20
+ KERN_DR_CPU_RM = 0x40
+ KERN_DR_MEM_ADD = 0x80
+ KERN_DR_MEM_RM = 0x100
+ KERN_KKEY_ENABLED = 0x200
+ KERN_RECOVERY = 0x400
+ KERN_MLS = 0x800
+ KERN_ENH_AFFINITY = 0x1000
+ KERN_VTPM = 0x2000
+ KERN_VIOS = 0x4000
+)
+
+// macros for SPLPAR environment.
+const (
+ SPLPAR_CAPABLE = 0x1
+ SPLPAR_ENABLED = 0x2
+ SPLPAR_DONATE_CAPABLE = 0x4
+)
+
+// Macros for SMT status determination
+const (
+ SMT_CAPABLE = 0x1
+ SMT_ENABLE = 0x2
+ SMT_BOUND = 0x4
+ SMT_ORDER = 0x8
+)
+
+// Macros for VRM status determination
+const (
+ VRM_CAPABLE = 0x1
+ VRM_ENABLE = 0x2
+ CMOX_CAPABLE = 0x4
+)
+
+// Macros for AME status determination
+const AME_ENABLE = 0x1
+
+// Macros for extended cache options
+const (
+ ECO_CAPABLE = 0x1
+ ECO_ENABLE = 0x2
+)
+
+// These define blocks of values for model_arch and model_impl that are reserved for OEM use.
+const (
+ MODEL_ARCH_RSPC = 2
+ MODEL_ARCH_CHRP = 3
+ MODEL_ARCH_IA64 = 4
+ MODEL_ARCH_OEM_START = 1024
+ MODEL_ARCH_OEM_END = 2047
+ MODEL_IMPL_RS6K_UP_MCA = 1
+ MODEL_IMPL_RS6K_SMP_MCA = 2
+ MODEL_IMPL_RSPC_UP_PCI = 3
+ MODEL_IMPL_RSPC_SMP_PCI = 4
+ MODEL_IMPL_CHRP_UP_PCI = 5
+ MODEL_IMPL_CHRP_SMP_PCI = 6
+ MODEL_IMPL_IA64_COM = 7
+ MODEL_IMPL_IA64_SOFTSDV = 8
+ MODEL_IMPL_MAMBO_SIM = 9
+ MODEL_IMPL_POWER_KVM = 10
+ MODEL_IMPL_OEM_START = 1024
+ MODEL_IMPL_OEM_END = 2047
+)
+
+// example determining processor compatibilty mode on AIX:
+// impl := unix.Getsystemcfg(SC_IMPL)
+// if impl&IMPL_POWER8 != 0 {
+// // we are running on POWER8
+// }
+// if impl&IMPL_POWER9 != 0 {
+// // we are running on POWER9
+// }
+
+func GetCPUImplementation() string {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ switch {
+ case impl&IMPL_POWER4 != 0:
+ return "POWER4"
+ case impl&IMPL_POWER5 != 0:
+ return "POWER5"
+ case impl&IMPL_POWER6 != 0:
+ return "POWER6"
+ case impl&IMPL_POWER7 != 0:
+ return "POWER7"
+ case impl&IMPL_POWER8 != 0:
+ return "POWER8"
+ case impl&IMPL_POWER9 != 0:
+ return "POWER9"
+ default:
+ return "Unknown"
+ }
+}
+
+func POWER9OrNewer() bool {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ if impl&IMPL_POWER9 != 0 {
+ return true
+ }
+ return false
+}
+
+func POWER9() bool {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ if impl&IMPL_POWER9 != 0 {
+ return true
+ }
+ return false
+}
+
+func POWER8OrNewer() bool {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 {
+ return true
+ }
+ return false
+}
+
+func POWER8() bool {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ if impl&IMPL_POWER8 != 0 {
+ return true
+ }
+ return false
+}
+
+func POWER7OrNewer() bool {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 {
+ return true
+ }
+ return false
+}
+
+func POWER7() bool {
+ impl := unix.Getsystemcfg(SC_IMPL)
+ if impl&IMPL_POWER7 != 0 {
+ return true
+ }
+ return false
+}
+
+func HasTransactionalMemory() bool {
+ impl := unix.Getsystemcfg(SC_TM_VER)
+ if impl > 0 {
+ return true
+ }
+ return false
+}
+
+func Is64Bit() bool {
+ impl := unix.Getsystemcfg(SC_WIDTH)
+ if impl == 64 {
+ return true
+ }
+ return false
+}
+
+func IsSMP() bool {
+ impl := unix.Getsystemcfg(SC_NCPUS)
+ if impl > 1 {
+ return true
+ }
+ return false
+}
+
+func HasVMX() bool {
+ impl := unix.Getsystemcfg(SC_VMX_VER)
+ if impl > 0 {
+ return true
+ }
+ return false
+}
+
+func HasVSX() bool {
+ impl := unix.Getsystemcfg(SC_VMX_VER)
+ if impl > 1 {
+ return true
+ }
+ return false
+}
+
+func HasDFP() bool {
+ impl := unix.Getsystemcfg(SC_DFP_STAT)
+ if impl > 1 {
+ return true
+ }
+ return false
+}
+
+func HasNxGzip() bool {
+ impl := unix.Getsystemcfg(SC_NX_CAP)
+ if impl&NX_GZIP_PRESENT > 0 {
+ return true
+ }
+ return false
+}
+
+func PksCapable() bool {
+ impl := unix.Getsystemcfg(SC_PKS_STATE)
+ if impl&PKS_STATE_CAPABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func PksEnabled() bool {
+ impl := unix.Getsystemcfg(SC_PKS_STATE)
+ if impl&PKS_STATE_ENABLED > 0 {
+ return true
+ }
+ return false
+}
+
+func CPUMode() string {
+ impl := unix.Getsystemcfg(SC_VERS)
+ switch impl {
+ case PV_9, PV_9_Compat:
+ return "POWER9"
+ case PV_8, PV_8_Compat:
+ return "POWER8"
+ case PV_7, PV_7_Compat:
+ return "POWER7"
+ default:
+ return "Unknown"
+ }
+}
+
+func KernelBits() int {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_TYPE == KERN_TYPE {
+ return 64
+ }
+ return 32
+}
+
+func IsLPAR() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_LPAR == KERN_LPAR {
+ return true
+ }
+ return false
+}
+
+func CpuAddCapable() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_DR_CPU_ADD == KERN_DR_CPU_ADD {
+ return true
+ }
+ return false
+}
+
+func CpuRemoveCapable() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_DR_CPU_RM == KERN_DR_CPU_RM {
+ return true
+ }
+ return false
+}
+
+func MemoryAddCapable() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_DR_MEM_ADD == KERN_DR_MEM_ADD {
+ return true
+ }
+ return false
+}
+
+func MemoryRemoveCapable() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_DR_MEM_RM == KERN_DR_MEM_RM {
+ return true
+ }
+ return false
+}
+
+func DLparCapable() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&(KERN_DR_CPU_ADD|KERN_DR_CPU_RM|KERN_DR_MEM_ADD|KERN_DR_MEM_RM) > 0 {
+ return true
+ }
+ return false
+}
+
+func IsNUMA() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_NUMA > 0 {
+ return true
+ }
+ return false
+}
+
+func KernelKeys() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_KKEY_ENABLED > 0 {
+ return true
+ }
+ return false
+}
+
+func RecoveryMode() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_RECOVERY > 0 {
+ return true
+ }
+ return false
+}
+
+func EnhancedAffinity() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_ENH_AFFINITY > 0 {
+ return true
+ }
+ return false
+}
+
+func VTpmEnabled() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_VTPM > 0 {
+ return true
+ }
+ return false
+}
+
+func IsVIOS() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_VIOS > 0 {
+ return true
+ }
+ return false
+}
+
+func MLSEnabled() bool {
+ impl := unix.Getsystemcfg(SC_KRN_ATTR)
+ if impl&KERN_MLS > 0 {
+ return true
+ }
+ return false
+}
+
+func SPLparCapable() bool {
+ impl := unix.Getsystemcfg(SC_SPLP_STAT)
+ if impl&SPLPAR_CAPABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func SPLparEnabled() bool {
+ impl := unix.Getsystemcfg(SC_SPLP_STAT)
+ if impl&SPLPAR_ENABLED > 0 {
+ return true
+ }
+ return false
+}
+
+func DedicatedLpar() bool {
+ return !SPLparEnabled()
+}
+
+func SPLparCapped() bool {
+ impl := unix.Getsystemcfg(SC_VCAPW)
+ if impl == 0 {
+ return true
+ }
+ return false
+}
+
+func SPLparDonating() bool {
+ impl := unix.Getsystemcfg(SC_SPLP_STAT)
+ if impl&SPLPAR_DONATE_CAPABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func SmtCapable() bool {
+ impl := unix.Getsystemcfg(SC_SMT_STAT)
+ if impl&SMT_CAPABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func SmtEnabled() bool {
+ impl := unix.Getsystemcfg(SC_SMT_STAT)
+ if impl&SMT_ENABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func VrmCapable() bool {
+ impl := unix.Getsystemcfg(SC_VRM_STAT)
+ if impl&VRM_CAPABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func VrmEnabled() bool {
+ impl := unix.Getsystemcfg(SC_VRM_STAT)
+ if impl&VRM_ENABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func AmeEnabled() bool {
+ impl := unix.Getsystemcfg(SC_AME_STAT)
+ if impl&AME_ENABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func EcoCapable() bool {
+ impl := unix.Getsystemcfg(SC_ECO_STAT)
+ if impl&ECO_CAPABLE > 0 {
+ return true
+ }
+ return false
+}
+
+func EcoEnabled() bool {
+ impl := unix.Getsystemcfg(SC_ECO_STAT)
+ if impl&ECO_ENABLE > 0 {
+ return true
+ }
+ return false
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_cpu.go b/test/integration/vendor/github.com/power-devops/perfstat/types_cpu.go
new file mode 100644
index 000000000..84425e92f
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_cpu.go
@@ -0,0 +1,186 @@
+package perfstat
+
+type CPU struct {
+ Name string /* logical processor name (cpu0, cpu1, ..) */
+ User int64 /* raw number of clock ticks spent in user mode */
+ Sys int64 /* raw number of clock ticks spent in system mode */
+ Idle int64 /* raw number of clock ticks spent idle */
+ Wait int64 /* raw number of clock ticks spent waiting for I/O */
+ PSwitch int64 /* number of context switches (changes of currently running process) */
+ Syscall int64 /* number of system calls executed */
+ Sysread int64 /* number of read system calls executed */
+ Syswrite int64 /* number of write system calls executed */
+ Sysfork int64 /* number of fork system call executed */
+ Sysexec int64 /* number of exec system call executed */
+ Readch int64 /* number of characters tranferred with read system call */
+ Writech int64 /* number of characters tranferred with write system call */
+ Bread int64 /* number of block reads */
+ Bwrite int64 /* number of block writes */
+ Lread int64 /* number of logical read requests */
+ Lwrite int64 /* number of logical write requests */
+ Phread int64 /* number of physical reads (reads on raw device) */
+ Phwrite int64 /* number of physical writes (writes on raw device) */
+ Iget int64 /* number of inode lookups */
+ Namei int64 /* number of vnode lookup from a path name */
+ Dirblk int64 /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+ Msg int64 /* number of IPC message operations */
+ Sema int64 /* number of IPC semaphore operations */
+ MinFaults int64 /* number of page faults with no I/O */
+ MajFaults int64 /* number of page faults with disk I/O */
+ PUser int64 /* raw number of physical processor tics in user mode */
+ PSys int64 /* raw number of physical processor tics in system mode */
+ PIdle int64 /* raw number of physical processor tics idle */
+ PWait int64 /* raw number of physical processor tics waiting for I/O */
+ RedispSD0 int64 /* number of thread redispatches within the scheduler affinity domain 0 */
+ RedispSD1 int64 /* number of thread redispatches within the scheduler affinity domain 1 */
+ RedispSD2 int64 /* number of thread redispatches within the scheduler affinity domain 2 */
+ RedispSD3 int64 /* number of thread redispatches within the scheduler affinity domain 3 */
+ RedispSD4 int64 /* number of thread redispatches within the scheduler affinity domain 4 */
+ RedispSD5 int64 /* number of thread redispatches within the scheduler affinity domain 5 */
+ MigrationPush int64 /* number of thread migrations from the local runque to another queue due to starvation load balancing */
+ MigrationS3grq int64 /* number of thread migrations from the global runque to the local runque resulting in a move accross scheduling domain 3 */
+ MigrationS3pul int64 /* number of thread migrations from another processor's runque resulting in a move accross scheduling domain 3 */
+ InvolCSwitch int64 /* number of involuntary thread context switches */
+ VolCSwitch int64 /* number of voluntary thread context switches */
+ RunQueue int64 /* number of threads on the runque */
+ Bound int64 /* number of bound threads */
+ DecrIntrs int64 /* number of decrementer tics interrupts */
+ MpcRIntrs int64 /* number of mpc's received interrupts */
+ MpcSIntrs int64 /* number of mpc's sent interrupts */
+ DevIntrs int64 /* number of device interrupts */
+ SoftIntrs int64 /* number of offlevel handlers called */
+ PhantIntrs int64 /* number of phantom interrupts */
+ IdleDonatedPurr int64 /* number of idle cycles donated by a dedicated partition enabled for donation */
+ IdleDonatedSpurr int64 /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+ BusyDonatedPurr int64 /* number of busy cycles donated by a dedicated partition enabled for donation */
+ BusyDonatedSpurr int64 /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+ IdleStolenPurr int64 /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+ IdleStolenSpurr int64 /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+ BusyStolenPurr int64 /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+ BusyStolenSpurr int64 /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+ Hpi int64 /* number of hypervisor page-ins */
+ Hpit int64 /* Time spent in hypervisor page-ins (in nanoseconds)*/
+ PUserSpurr int64 /* number of spurr cycles spent in user mode */
+ PSysSpurr int64 /* number of spurr cycles spent in kernel mode */
+ PIdleSpurr int64 /* number of spurr cycles spent in idle mode */
+ PWaitSpurr int64 /* number of spurr cycles spent in wait mode */
+ SpurrFlag int32 /* set if running in spurr mode */
+ LocalDispatch int64 /* number of local thread dispatches on this logical CPU */
+ NearDispatch int64 /* number of near thread dispatches on this logical CPU */
+ FarDispatch int64 /* number of far thread dispatches on this logical CPU */
+ CSwitches int64 /* Context switches */
+ Version int64 /* version number (1, 2, etc.,) */
+ TbLast int64 /* timebase counter */
+ State int /* Show whether the CPU is offline or online */
+ VtbLast int64 /* Last virtual timebase read */
+ ICountLast int64 /* Last instruction count read */
+}
+
+type CPUTotal struct {
+ NCpus int /* number of active logical processors */
+ NCpusCfg int /* number of configured processors */
+ Description string /* processor description (type/official name) */
+ ProcessorHz int64 /* processor speed in Hz */
+ User int64 /* raw total number of clock ticks spent in user mode */
+ Sys int64 /* raw total number of clock ticks spent in system mode */
+ Idle int64 /* raw total number of clock ticks spent idle */
+ Wait int64 /* raw total number of clock ticks spent waiting for I/O */
+ PSwitch int64 /* number of process switches (change in currently running process) */
+ Syscall int64 /* number of system calls executed */
+ Sysread int64 /* number of read system calls executed */
+ Syswrite int64 /* number of write system calls executed */
+ Sysfork int64 /* number of forks system calls executed */
+ Sysexec int64 /* number of execs system calls executed */
+ Readch int64 /* number of characters tranferred with read system call */
+ Writech int64 /* number of characters tranferred with write system call */
+ DevIntrs int64 /* number of device interrupts */
+ SoftIntrs int64 /* number of software interrupts */
+ Lbolt int64 /* number of ticks since last reboot */
+ LoadAvg1 float32 /* times the average number of runnables processes during the last 1, 5 and 15 minutes. */
+ LoadAvg5 float32 /* times the average number of runnables processes during the last 1, 5 and 15 minutes. */
+ LoadAvg15 float32 /* times the average number of runnables processes during the last 1, 5 and 15 minutes. */
+ RunQueue int64 /* length of the run queue (processes ready) */
+ SwpQueue int64 /* length of the swap queue (processes waiting to be paged in) */
+ Bread int64 /* number of blocks read */
+ Bwrite int64 /* number of blocks written */
+ Lread int64 /* number of logical read requests */
+ Lwrite int64 /* number of logical write requests */
+ Phread int64 /* number of physical reads (reads on raw devices) */
+ Phwrite int64 /* number of physical writes (writes on raw devices) */
+ RunOcc int64 /* updated whenever runque is updated, i.e. the runqueue is occupied. This can be used to compute the simple average of ready processes */
+ SwpOcc int64 /* updated whenever swpque is updated. i.e. the swpqueue is occupied. This can be used to compute the simple average processes waiting to be paged in */
+ Iget int64 /* number of inode lookups */
+ Namei int64 /* number of vnode lookup from a path name */
+ Dirblk int64 /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+ Msg int64 /* number of IPC message operations */
+ Sema int64 /* number of IPC semaphore operations */
+ RcvInt int64 /* number of tty receive interrupts */
+ XmtInt int64 /* number of tyy transmit interrupts */
+ MdmInt int64 /* number of modem interrupts */
+ TtyRawInch int64 /* number of raw input characters */
+ TtyCanInch int64 /* number of canonical input characters (always zero) */
+ TtyRawOutch int64 /* number of raw output characters */
+ Ksched int64 /* number of kernel processes created */
+ Koverf int64 /* kernel process creation attempts where: -the user has forked to their maximum limit -the configuration limit of processes has been reached */
+ Kexit int64 /* number of kernel processes that became zombies */
+ Rbread int64 /* number of remote read requests */
+ Rcread int64 /* number of cached remote reads */
+ Rbwrt int64 /* number of remote writes */
+ Rcwrt int64 /* number of cached remote writes */
+ Traps int64 /* number of traps */
+ NCpusHigh int64 /* index of highest processor online */
+ PUser int64 /* raw number of physical processor tics in user mode */
+ PSys int64 /* raw number of physical processor tics in system mode */
+ PIdle int64 /* raw number of physical processor tics idle */
+ PWait int64 /* raw number of physical processor tics waiting for I/O */
+ DecrIntrs int64 /* number of decrementer tics interrupts */
+ MpcRIntrs int64 /* number of mpc's received interrupts */
+ MpcSIntrs int64 /* number of mpc's sent interrupts */
+ PhantIntrs int64 /* number of phantom interrupts */
+ IdleDonatedPurr int64 /* number of idle cycles donated by a dedicated partition enabled for donation */
+ IdleDonatedSpurr int64 /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+ BusyDonatedPurr int64 /* number of busy cycles donated by a dedicated partition enabled for donation */
+ BusyDonatedSpurr int64 /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+ IdleStolenPurr int64 /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+ IdleStolenSpurr int64 /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+ BusyStolenPurr int64 /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+ BusyStolenSpurr int64 /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+ IOWait int32 /* number of processes that are asleep waiting for buffered I/O */
+ PhysIO int32 /* number of processes waiting for raw I/O */
+ TWait int64 /* number of threads that are waiting for filesystem direct(cio) */
+ Hpi int64 /* number of hypervisor page-ins */
+ Hpit int64 /* Time spent in hypervisor page-ins (in nanoseconds) */
+ PUserSpurr int64 /* number of spurr cycles spent in user mode */
+ PSysSpurr int64 /* number of spurr cycles spent in kernel mode */
+ PIdleSpurr int64 /* number of spurr cycles spent in idle mode */
+ PWaitSpurr int64 /* number of spurr cycles spent in wait mode */
+ SpurrFlag int /* set if running in spurr mode */
+ Version int64 /* version number (1, 2, etc.,) */
+ TbLast int64 /*time base counter */
+ PurrCoalescing int64 /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/
+ SpurrCoalescing int64 /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero. */
+}
+
+type CPUUtil struct {
+ Version int64
+ CpuID string /* holds the id of the cpu */
+ Entitlement float32 /* Partition's entitlement */
+ UserPct float32 /* % of utilization in user mode */
+ KernPct float32 /* % of utilization in kernel mode */
+ IdlePct float32 /* % of utilization in idle mode */
+ WaitPct float32 /* % of utilization in wait mode */
+ PhysicalBusy float32 /* physical cpus busy */
+ PhysicalConsumed float32 /* total cpus consumed by the partition */
+ FreqPct float32 /* Average freq% over the last interval */
+ EntitlementPct float32 /* % of entitlement used */
+ BusyPct float32 /* % of entitlement busy */
+ IdleDonatedPct float32 /* % idle cycles donated */
+ BusyDonatedPct float32 /* % of busy cycles donated */
+ IdleStolenPct float32 /* % idle cycles stolen */
+ BusyStolenPct float32 /* % busy cycles stolen */
+ LUserPct float32 /* % of utilization in user mode, in terms of logical processor ticks */
+ LKernPct float32 /* % of utilization in kernel mode, in terms of logical processor ticks*/
+ LIdlePct float32 /* % of utilization in idle mode, in terms of logical processor ticks */
+ LWaitPct float32 /* % of utilization in wait mode, in terms of logical processor ticks */
+ DeltaTime int64 /* delta time in milliseconds, for which utilization is evaluated */
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_disk.go b/test/integration/vendor/github.com/power-devops/perfstat/types_disk.go
new file mode 100644
index 000000000..ca1493d87
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_disk.go
@@ -0,0 +1,176 @@
+package perfstat
+
+type DiskTotal struct {
+ Number int32 /* total number of disks */
+ Size int64 /* total size of all disks (in MB) */
+ Free int64 /* free portion of all disks (in MB) */
+ XRate int64 /* __rxfers: total number of transfers from disk */
+ Xfers int64 /* total number of transfers to/from disk */
+ Wblks int64 /* 512 bytes blocks written to all disks */
+ Rblks int64 /* 512 bytes blocks read from all disks */
+ Time int64 /* amount of time disks are active */
+ Version int64 /* version number (1, 2, etc.,) */
+ Rserv int64 /* Average read or receive service time */
+ MinRserv int64 /* min read or receive service time */
+ MaxRserv int64 /* max read or receive service time */
+ RTimeOut int64 /* number of read request timeouts */
+ RFailed int64 /* number of failed read requests */
+ Wserv int64 /* Average write or send service time */
+ MinWserv int64 /* min write or send service time */
+ MaxWserv int64 /* max write or send service time */
+ WTimeOut int64 /* number of write request timeouts */
+ WFailed int64 /* number of failed write requests */
+ WqDepth int64 /* instantaneous wait queue depth (number of requests waiting to be sent to disk) */
+ WqTime int64 /* accumulated wait queueing time */
+ WqMinTime int64 /* min wait queueing time */
+ WqMaxTime int64 /* max wait queueing time */
+}
+
+// Disk Adapter Types
+const (
+ DA_SCSI = 0 /* 0 ==> SCSI, SAS, other legacy adapter types */
+ DA_VSCSI /* 1 ==> Virtual SCSI/SAS Adapter */
+ DA_FCA /* 2 ==> Fiber Channel Adapter */
+)
+
+type DiskAdapter struct {
+ Name string /* name of the adapter (from ODM) */
+ Description string /* adapter description (from ODM) */
+ Number int32 /* number of disks connected to adapter */
+ Size int64 /* total size of all disks (in MB) */
+ Free int64 /* free portion of all disks (in MB) */
+ XRate int64 /* __rxfers: total number of reads via adapter */
+ Xfers int64 /* total number of transfers via adapter */
+ Rblks int64 /* 512 bytes blocks written via adapter */
+ Wblks int64 /* 512 bytes blocks read via adapter */
+ Time int64 /* amount of time disks are active */
+ Version int64 /* version number (1, 2, etc.,) */
+ AdapterType int64 /* 0 ==> SCSI, SAS, other legacy adapter types, 1 ==> Virtual SCSI/SAS Adapter, 2 ==> Fiber Channel Adapter */
+ DkBSize int64 /* Number of Bytes in a block for this disk*/
+ DkRxfers int64 /* Number of transfers from disk */
+ DkRserv int64 /* read or receive service time */
+ DkWserv int64 /* write or send service time */
+ MinRserv int64 /* Minimum read service time */
+ MaxRserv int64 /* Maximum read service time */
+ MinWserv int64 /* Minimum Write service time */
+ MaxWserv int64 /* Maximum write service time */
+ WqDepth int64 /* driver wait queue depth */
+ WqSampled int64 /* accumulated sampled dk_wq_depth */
+ WqTime int64 /* accumulated wait queueing time */
+ WqMinTime int64 /* minimum wait queueing time */
+ WqMaxTime int64 /* maximum wait queueing time */
+ QFull int64 /* "Service" queue full occurrence count (number of times the adapter/devices connected to the adapter is not accepting any more request) */
+ QSampled int64 /* accumulated sampled */
+}
+
+type Disk struct {
+ Name string /* name of the disk */
+ Description string /* disk description (from ODM) */
+ VGName string /* volume group name (from ODM) */
+ Size int64 /* size of the disk (in MB) */
+ Free int64 /* free portion of the disk (in MB) */
+ BSize int64 /* disk block size (in bytes) */
+ XRate int64 /* number of transfers from disk */
+ Xfers int64 /* number of transfers to/from disk */
+ Wblks int64 /* number of blocks written to disk */
+ Rblks int64 /* number of blocks read from disk */
+ QDepth int64 /* instantaneous "service" queue depth (number of requests sent to disk and not completed yet) */
+ Time int64 /* amount of time disk is active */
+ Adapter string /* disk adapter name */
+ PathsCount int32 /* number of paths to this disk */
+ QFull int64 /* "service" queue full occurrence count (number of times the disk is not accepting any more request) */
+ Rserv int64 /* read or receive service time */
+ RTimeOut int64 /* number of read request timeouts */
+ Rfailed int64 /* number of failed read requests */
+ MinRserv int64 /* min read or receive service time */
+ MaxRserv int64 /* max read or receive service time */
+ Wserv int64 /* write or send service time */
+ WTimeOut int64 /* number of write request timeouts */
+ Wfailed int64 /* number of failed write requests */
+ MinWserv int64 /* min write or send service time */
+ MaxWserv int64 /* max write or send service time */
+ WqDepth int64 /* instantaneous wait queue depth (number of requests waiting to be sent to disk) */
+ WqSampled int64 /* accumulated sampled dk_wq_depth */
+ WqTime int64 /* accumulated wait queueing time */
+ WqMinTime int64 /* min wait queueing time */
+ WqMaxTime int64 /* max wait queueing time */
+ QSampled int64 /* accumulated sampled dk_q_depth */
+ Version int64 /* version number (1, 2, etc.,) */
+ PseudoDisk bool /*Indicates whether pseudo or physical disk */
+ VTDisk bool /* 1- Virtual Target Disk, 0 - Others */
+}
+
+type DiskPath struct {
+ Name string /* name of the path */
+ XRate int64 /* __rxfers: number of reads via the path */
+ Xfers int64 /* number of transfers via the path */
+ Rblks int64 /* 512 bytes blocks written via the path */
+ Wblks int64 /* 512 bytes blocks read via the path */
+ Time int64 /* amount of time disks are active */
+ Adapter string /* disk adapter name (from ODM) */
+ QFull int64 /* "service" queue full occurrence count (number of times the disk is not accepting any more request) */
+ Rserv int64 /* read or receive service time */
+ RTimeOut int64 /* number of read request timeouts */
+ Rfailed int64 /* number of failed read requests */
+ MinRserv int64 /* min read or receive service time */
+ MaxRserv int64 /* max read or receive service time */
+ Wserv int64 /* write or send service time */
+ WTimeOut int64 /* number of write request timeouts */
+ Wfailed int64 /* number of failed write requests */
+ MinWserv int64 /* min write or send service time */
+ MaxWserv int64 /* max write or send service time */
+ WqDepth int64 /* instantaneous wait queue depth (number of requests waiting to be sent to disk) */
+ WqSampled int64 /* accumulated sampled dk_wq_depth */
+ WqTime int64 /* accumulated wait queueing time */
+ WqMinTime int64 /* min wait queueing time */
+ WqMaxTime int64 /* max wait queueing time */
+ QSampled int64 /* accumulated sampled dk_q_depth */
+ Version int64 /* version number (1, 2, etc.,) */
+}
+
+const (
+ FC_DOWN = 0 // FC Adapter state is DOWN
+ FC_UP = 1 // FC Adapter state is UP
+)
+
+const (
+ FCT_FCHBA = 0 // FC type - real Fiber Channel Adapter
+ FCT_VFC = 1 // FC type - virtual Fiber Channel
+)
+
+type FCAdapter struct {
+ Version int64 /* version number (1, 2, etc.,) */
+ Name string /* name of the adapter */
+ State int32 /* FC Adapter state UP or DOWN */
+ InputRequests int64 /* Number of Input Requests*/
+ OutputRequests int64 /* Number of Output Requests */
+ InputBytes int64 /* Number of Input Bytes */
+ OutputBytes int64 /* Number of Output Bytes */
+ EffMaxTransfer int64 /* Adapter's Effective Maximum Transfer Value */
+ NoDMAResourceCnt int64 /* Count of DMA failures due to no DMA Resource available */
+ NoCmdResourceCnt int64 /* Count of failures to allocate a command due to no command resource available */
+ AttentionType int32 /* Link up or down Indicator */
+ SecondsSinceLastReset int64 /* Displays the seconds since last reset of the statistics on the adapter */
+ TxFrames int64 /* Number of frames transmitted */
+ TxWords int64 /* Fiber Channel Kbytes transmitted */
+ RxFrames int64 /* Number of Frames Received */
+ RxWords int64 /* Fiber Channel Kbytes Received */
+ LIPCount int64 /* Count of LIP (Loop Initialization Protocol) Events received in case we have FC-AL */
+ NOSCount int64 /* Count of NOS (Not_Operational) Events. This indicates a link failure state. */
+ ErrorFrames int64 /* Number of frames received with the CRC Error */
+ DumpedFrames int64 /* Number of lost frames */
+ LinkFailureCount int64 /* Count of Link failures */
+ LossofSyncCount int64 /* Count of loss of sync */
+ LossofSignal int64 /* Count of loss of Signal */
+ PrimitiveSeqProtocolErrCount int64 /* number of times a primitive sequence was in error */
+ InvalidTxWordCount int64 /* Count of Invalid Transmission words received */
+ InvalidCRCCount int64 /* Count of CRC Errors in a Received Frame */
+ PortFcId int64 /* SCSI Id of the adapter */
+ PortSpeed int64 /* Speed of Adapter in GBIT */
+ PortType string /* Type of connection. The Possible Values are Fabric, Private Loop, Point-to-Point, unknown */
+ PortWWN int64 /* World Wide Port name */
+ PortSupportedSpeed int64 /* Supported Port Speed in GBIT */
+ AdapterType int /* 0 - Fiber Chanel, 1 - Virtual Fiber Chanel Adapter */
+ VfcName string /* name of the Virtual Fiber Chanel(VFC) adapter */
+ ClientPartName string /* name of the client partition */
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_fs.go b/test/integration/vendor/github.com/power-devops/perfstat/types_fs.go
new file mode 100644
index 000000000..b4b43ac61
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_fs.go
@@ -0,0 +1,195 @@
+package perfstat
+
+import (
+ "strings"
+)
+
+type FileSystem struct {
+ Device string /* name of the mounted device */
+ MountPoint string /* where the device is mounted */
+ FSType int /* File system type, see the constants below */
+ Flags uint /* Flags of the file system */
+ TotalBlocks int64 /* number of 512 bytes blocks in the filesystem */
+ FreeBlocks int64 /* number of free 512 bytes block in the filesystem */
+ TotalInodes int64 /* total number of inodes in the filesystem */
+ FreeInodes int64 /* number of free inodes in the filesystem */
+}
+
+func (f *FileSystem) TypeString() string {
+ switch f.FSType {
+ case FS_JFS2:
+ return "jfs2"
+ case FS_NAMEFS:
+ return "namefs"
+ case FS_NFS:
+ return "nfs"
+ case FS_JFS:
+ return "jfs"
+ case FS_CDROM:
+ return "cdrfs"
+ case FS_PROCFS:
+ return "procfs"
+ case FS_SFS:
+ return "sfs"
+ case FS_CACHEFS:
+ return "cachefs"
+ case FS_NFS3:
+ return "nfs3"
+ case FS_AUTOFS:
+ return "autofs"
+ case FS_POOLFS:
+ return "poolfs"
+ case FS_VXFS:
+ return "vxfs"
+ case FS_VXODM:
+ return "vxodm"
+ case FS_UDF:
+ return "udfs"
+ case FS_NFS4:
+ return "nfs4"
+ case FS_RFS4:
+ return "rfs4"
+ case FS_CIFS:
+ return "cifs"
+ case FS_PMEMFS:
+ return "pmemfs"
+ case FS_AHAFS:
+ return "ahafs"
+ case FS_STNFS:
+ return "stnfs"
+ case FS_ASMFS:
+ return "asmfs"
+ }
+ return "unknown"
+}
+
+func (f *FileSystem) FlagsString() string {
+ var flags []string
+
+ switch {
+ case f.Flags&VFS_READONLY != 0:
+ flags = append(flags, "ro")
+ case f.Flags&VFS_REMOVABLE != 0:
+ flags = append(flags, "removable")
+ case f.Flags&VFS_DEVMOUNT != 0:
+ flags = append(flags, "local")
+ case f.Flags&VFS_REMOTE != 0:
+ flags = append(flags, "remote")
+ case f.Flags&VFS_SYSV_MOUNT != 0:
+ flags = append(flags, "sysv")
+ case f.Flags&VFS_UNMOUNTING != 0:
+ flags = append(flags, "unmounting")
+ case f.Flags&VFS_NOSUID != 0:
+ flags = append(flags, "nosuid")
+ case f.Flags&VFS_NODEV != 0:
+ flags = append(flags, "nodev")
+ case f.Flags&VFS_NOINTEG != 0:
+ flags = append(flags, "nointeg")
+ case f.Flags&VFS_NOMANAGER != 0:
+ flags = append(flags, "nomanager")
+ case f.Flags&VFS_NOCASE != 0:
+ flags = append(flags, "nocase")
+ case f.Flags&VFS_UPCASE != 0:
+ flags = append(flags, "upcase")
+ case f.Flags&VFS_NBC != 0:
+ flags = append(flags, "nbc")
+ case f.Flags&VFS_MIND != 0:
+ flags = append(flags, "mind")
+ case f.Flags&VFS_RBR != 0:
+ flags = append(flags, "rbr")
+ case f.Flags&VFS_RBW != 0:
+ flags = append(flags, "rbw")
+ case f.Flags&VFS_DISCONNECTED != 0:
+ flags = append(flags, "disconnected")
+ case f.Flags&VFS_SHUTDOWN != 0:
+ flags = append(flags, "shutdown")
+ case f.Flags&VFS_VMOUNTOK != 0:
+ flags = append(flags, "vmountok")
+ case f.Flags&VFS_SUSER != 0:
+ flags = append(flags, "suser")
+ case f.Flags&VFS_SOFT_MOUNT != 0:
+ flags = append(flags, "soft")
+ case f.Flags&VFS_UNMOUNTED != 0:
+ flags = append(flags, "unmounted")
+ case f.Flags&VFS_DEADMOUNT != 0:
+ flags = append(flags, "deadmount")
+ case f.Flags&VFS_SNAPSHOT != 0:
+ flags = append(flags, "snapshot")
+ case f.Flags&VFS_VCM_ON != 0:
+ flags = append(flags, "vcm_on")
+ case f.Flags&VFS_VCM_MONITOR != 0:
+ flags = append(flags, "vcm_monitor")
+ case f.Flags&VFS_ATIMEOFF != 0:
+ flags = append(flags, "noatime")
+ case f.Flags&VFS_READMOSTLY != 0:
+ flags = append(flags, "readmostly")
+ case f.Flags&VFS_CIOR != 0:
+ flags = append(flags, "cior")
+ case f.Flags&VFS_CIO != 0:
+ flags = append(flags, "cio")
+ case f.Flags&VFS_DIO != 0:
+ flags = append(flags, "dio")
+ }
+
+ return strings.Join(flags, ",")
+}
+
+// Filesystem types
+const (
+ FS_JFS2 = 0 /* AIX physical fs "jfs2" */
+ FS_NAMEFS = 1 /* AIX pseudo fs "namefs" */
+ FS_NFS = 2 /* SUN Network File System "nfs" */
+ FS_JFS = 3 /* AIX R3 physical fs "jfs" */
+ FS_CDROM = 5 /* CDROM File System "cdrom" */
+ FS_PROCFS = 6 /* PROCFS File System "proc" */
+ FS_SFS = 16 /* AIX Special FS (STREAM mounts) */
+ FS_CACHEFS = 17 /* Cachefs file system */
+ FS_NFS3 = 18 /* NFSv3 file system */
+ FS_AUTOFS = 19 /* Automount file system */
+ FS_POOLFS = 20 /* Pool file system */
+ FS_VXFS = 32 /* THRPGIO File System "vxfs" */
+ FS_VXODM = 33 /* For Veritas File System */
+ FS_UDF = 34 /* UDFS file system */
+ FS_NFS4 = 35 /* NFSv4 file system */
+ FS_RFS4 = 36 /* NFSv4 Pseudo file system */
+ FS_CIFS = 37 /* AIX SMBFS (CIFS client) */
+ FS_PMEMFS = 38 /* MCR Async Mobility pseudo file system */
+ FS_AHAFS = 39 /* AHAFS File System "aha" */
+ FS_STNFS = 40 /* Short-Term NFS */
+ FS_ASMFS = 41 /* Oracle ASM FS */
+)
+
+// Filesystem flags
+const (
+ VFS_READONLY = 0x00000001 /* rdonly access to vfs */
+ VFS_REMOVABLE = 0x00000002 /* removable (diskette) media */
+ VFS_DEVMOUNT = 0x00000004 /* physical device mount */
+ VFS_REMOTE = 0x00000008 /* file system is on network */
+ VFS_SYSV_MOUNT = 0x00000010 /* System V style mount */
+ VFS_UNMOUNTING = 0x00000020 /* originated by unmount() */
+ VFS_NOSUID = 0x00000040 /* don't maintain suid-ness across this mount */
+ VFS_NODEV = 0x00000080 /* don't allow device access across this mount */
+ VFS_NOINTEG = 0x00000100 /* no integrity mount option */
+ VFS_NOMANAGER = 0x00000200 /* mount managed fs w/o manager */
+ VFS_NOCASE = 0x00000400 /* do not map dir names */
+ VFS_UPCASE = 0x00000800 /* map dir names to uppercase */
+ VFS_NBC = 0x00001000 /* NBC cached file in this vfs */
+ VFS_MIND = 0x00002000 /* multi-segment .indirect */
+ VFS_RBR = 0x00004000 /* Release-behind when reading */
+ VFS_RBW = 0x00008000 /* Release-behind when writing */
+ VFS_DISCONNECTED = 0x00010000 /* file mount not in use */
+ VFS_SHUTDOWN = 0x00020000 /* forced unmount for shutdown */
+ VFS_VMOUNTOK = 0x00040000 /* dir/file mnt permission flag */
+ VFS_SUSER = 0x00080000 /* client-side suser perm. flag */
+ VFS_SOFT_MOUNT = 0x00100000 /* file-over-file or directory over directory "soft" mount */
+ VFS_UNMOUNTED = 0x00200000 /* unmount completed, stale vnodes are left in the vfs */
+ VFS_DEADMOUNT = 0x00400000 /* softmount vfs should be disconnected at last vnode free */
+ VFS_SNAPSHOT = 0x00800000 /* snapshot mount */
+ VFS_VCM_ON = 0x01000000 /* VCM is currently active */
+ VFS_VCM_MONITOR = 0x02000000 /* VCM monitoring is active */
+ VFS_ATIMEOFF = 0x04000000 /* no atime updates during i/o */
+ VFS_READMOSTLY = 0x10000000 /* ROFS allows open for write */
+ VFS_CIOR = 0x20000000 /* O_CIOR mount */
+ VFS_CIO = 0x40000000 /* O_CIO mount */
+ VFS_DIO = 0x80000000 /* O_DIRECT mount */
+)
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_lpar.go b/test/integration/vendor/github.com/power-devops/perfstat/types_lpar.go
new file mode 100644
index 000000000..2d3c32fa8
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_lpar.go
@@ -0,0 +1,68 @@
+package perfstat
+
+type PartitionType struct {
+ SmtCapable bool /* OS supports SMT mode */
+ SmtEnabled bool /* SMT mode is on */
+ LparCapable bool /* OS supports logical partitioning */
+ LparEnabled bool /* logical partitioning is on */
+ SharedCapable bool /* OS supports shared processor LPAR */
+ SharedEnabled bool /* partition runs in shared mode */
+ DLparCapable bool /* OS supports dynamic LPAR */
+ Capped bool /* partition is capped */
+ Kernel64bit bool /* kernel is 64 bit */
+ PoolUtilAuthority bool /* pool utilization available */
+ DonateCapable bool /* capable of donating cycles */
+ DonateEnabled bool /* enabled for donating cycles */
+ AmsCapable bool /* 1 = AMS(Active Memory Sharing) capable, 0 = Not AMS capable */
+ AmsEnabled bool /* 1 = AMS(Active Memory Sharing) enabled, 0 = Not AMS enabled */
+ PowerSave bool /*1= Power saving mode is enabled*/
+ AmeEnabled bool /* Active Memory Expansion is enabled */
+ SharedExtended bool
+}
+
+type PartitionValue struct {
+ Online int64
+ Max int64
+ Min int64
+ Desired int64
+}
+
+type PartitionConfig struct {
+ Version int64 /* Version number */
+ Name string /* Partition Name */
+ Node string /* Node Name */
+ Conf PartitionType /* Partition Properties */
+ Number int32 /* Partition Number */
+ GroupID int32 /* Group ID */
+ ProcessorFamily string /* Processor Type */
+ ProcessorModel string /* Processor Model */
+ MachineID string /* Machine ID */
+ ProcessorMhz float64 /* Processor Clock Speed in MHz */
+ NumProcessors PartitionValue /* Number of Configured Physical Processors in frame*/
+ OSName string /* Name of Operating System */
+ OSVersion string /* Version of operating System */
+ OSBuild string /* Build of Operating System */
+ LCpus int32 /* Number of Logical CPUs */
+ SmtThreads int32 /* Number of SMT Threads */
+ Drives int32 /* Total Number of Drives */
+ NetworkAdapters int32 /* Total Number of Network Adapters */
+ CpuCap PartitionValue /* Min, Max and Online CPU Capacity */
+ Weightage int32 /* Variable Processor Capacity Weightage */
+ EntCapacity int32 /* number of processor units this partition is entitled to receive */
+ VCpus PartitionValue /* Min, Max and Online Virtual CPUs */
+ PoolID int32 /* Shared Pool ID of physical processors, to which this partition belongs*/
+ ActiveCpusInPool int32 /* Count of physical CPUs in the shared processor pool, to which this partition belongs */
+ PoolWeightage int32 /* Pool Weightage */
+ SharedPCpu int32 /* Number of physical processors allocated for shared processor use */
+ MaxPoolCap int32 /* Maximum processor capacity of partition's pool */
+ EntPoolCap int32 /* Entitled processor capacity of partition's pool */
+ Mem PartitionValue /* Min, Max and Online Memory */
+ MemWeightage int32 /* Variable Memory Capacity Weightage */
+ TotalIOMemoryEntitlement int64 /* I/O Memory Entitlement of the partition in bytes */
+ MemPoolID int32 /* AMS pool id of the pool the LPAR belongs to */
+ HyperPgSize int64 /* Hypervisor page size in KB*/
+ ExpMem PartitionValue /* Min, Max and Online Expanded Memory */
+ TargetMemExpFactor int64 /* Target Memory Expansion Factor scaled by 100 */
+ TargetMemExpSize int64 /* Expanded Memory Size in MB */
+ SubProcessorMode int32 /* Split core mode, its value can be 0,1,2 or 4. 0 for unsupported, 1 for capable but not enabled, 2 or 4 for enabled*/
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_lvm.go b/test/integration/vendor/github.com/power-devops/perfstat/types_lvm.go
new file mode 100644
index 000000000..8f7176a61
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_lvm.go
@@ -0,0 +1,31 @@
+package perfstat
+
+type LogicalVolume struct {
+ Name string /* logical volume name */
+ VGName string /* volume group name */
+ OpenClose int64 /* LVM_QLVOPEN, etc. (see lvm.h) */
+ State int64 /* LVM_UNDEF, etc. (see lvm.h) */
+ MirrorPolicy int64 /* LVM_PARALLEL, etc. (see lvm.h) */
+ MirrorWriteConsistency int64 /* LVM_CONSIST, etc. (see lvm.h) */
+ WriteVerify int64 /* LVM_VERIFY, etc. (see lvm.h) */
+ PPsize int64 /* physical partition size in MB */
+ LogicalPartitions int64 /* total number of logical paritions configured for this logical volume */
+ Mirrors int32 /* number of physical mirrors for each logical partition */
+ IOCnt int64 /* Number of read and write requests */
+ KBReads int64 /* Number of Kilobytes read */
+ KBWrites int64 /* Number of Kilobytes written */
+ Version int64 /* version number (1, 2, etc.,) */
+}
+
+type VolumeGroup struct {
+ Name string /* volume group name */
+ TotalDisks int64 /* number of physical volumes in the volume group */
+ ActiveDisks int64 /* number of active physical volumes in the volume group */
+ TotalLogicalVolumes int64 /* number of logical volumes in the volume group */
+ OpenedLogicalVolumes int64 /* number of logical volumes opened in the volume group */
+ IOCnt int64 /* Number of read and write requests */
+ KBReads int64 /* Number of Kilobytes read */
+ KBWrites int64 /* Number of Kilobytes written */
+ Version int64 /* version number (1, 2, etc.,) */
+ VariedState int /* Indicates volume group available or not */
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_memory.go b/test/integration/vendor/github.com/power-devops/perfstat/types_memory.go
new file mode 100644
index 000000000..096d29ad2
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_memory.go
@@ -0,0 +1,101 @@
+package perfstat
+
+type MemoryTotal struct {
+ VirtualTotal int64 /* total virtual memory (in 4KB pages) */
+ RealTotal int64 /* total real memory (in 4KB pages) */
+ RealFree int64 /* free real memory (in 4KB pages) */
+ RealPinned int64 /* real memory which is pinned (in 4KB pages) */
+ RealInUse int64 /* real memory which is in use (in 4KB pages) */
+ BadPages int64 /* number of bad pages */
+ PageFaults int64 /* number of page faults */
+ PageIn int64 /* number of pages paged in */
+ PageOut int64 /* number of pages paged out */
+ PgSpIn int64 /* number of page ins from paging space */
+ PgSpOut int64 /* number of page outs from paging space */
+ Scans int64 /* number of page scans by clock */
+ Cycles int64 /* number of page replacement cycles */
+ PgSteals int64 /* number of page steals */
+ NumPerm int64 /* number of frames used for files (in 4KB pages) */
+ PgSpTotal int64 /* total paging space (in 4KB pages) */
+ PgSpFree int64 /* free paging space (in 4KB pages) */
+ PgSpRsvd int64 /* reserved paging space (in 4KB pages) */
+ RealSystem int64 /* real memory used by system segments (in 4KB pages). */
+ RealUser int64 /* real memory used by non-system segments (in 4KB pages). */
+ RealProcess int64 /* real memory used by process segments (in 4KB pages). */
+ VirtualActive int64 /* Active virtual pages. Virtual pages are considered active if they have been accessed */
+ IOME int64 /* I/O memory entitlement of the partition in bytes*/
+ IOMU int64 /* I/O memory entitlement of the partition in use in bytes*/
+ IOHWM int64 /* High water mark of I/O memory entitlement used in bytes*/
+ PMem int64 /* Amount of physical mmeory currently backing partition's logical memory in bytes*/
+ CompressedTotal int64 /* Total numbers of pages in compressed pool (in 4KB pages) */
+ CompressedWSegPg int64 /* Number of compressed working storage pages */
+ CPgIn int64 /* number of page ins to compressed pool */
+ CPgOut int64 /* number of page outs from compressed pool */
+ TrueSize int64 /* True Memory Size in 4KB pages */
+ ExpandedMemory int64 /* Expanded Memory Size in 4KB pages */
+ CompressedWSegSize int64 /* Total size of the compressed working storage pages in the pool */
+ TargetCPoolSize int64 /* Target Compressed Pool Size in bytes */
+ MaxCPoolSize int64 /* Max Size of Compressed Pool in bytes */
+ MinUCPoolSize int64 /* Min Size of Uncompressed Pool in bytes */
+ CPoolSize int64 /* Compressed Pool size in bytes */
+ UCPoolSize int64 /* Uncompressed Pool size in bytes */
+ CPoolInUse int64 /* Compressed Pool Used in bytes */
+ UCPoolInUse int64 /* Uncompressed Pool Used in bytes */
+ Version int64 /* version number (1, 2, etc.,) */
+ RealAvailable int64 /* number of pages (in 4KB pages) of memory available without paging out working segments */
+ BytesCoalesced int64 /* The number of bytes of the calling partition.s logical real memory coalesced because they contained duplicated data */
+ BytesCoalescedMemPool int64 /* number of bytes of logical real memory coalesced because they contained duplicated data in the calling partition.s memory */
+}
+
+type MemoryPage struct {
+ PSize int64 /* page size in bytes */
+ RealTotal int64 /* number of real memory frames of this page size */
+ RealFree int64 /* number of pages on free list */
+ RealPinned int64 /* number of pages pinned */
+ RealInUse int64 /* number of pages in use */
+ PgExct int64 /* number of page faults */
+ PgIns int64 /* number of pages paged in */
+ PgOuts int64 /* number of pages paged out */
+ PgSpIns int64 /* number of page ins from paging space */
+ PgSpOuts int64 /* number of page outs from paging space */
+ Scans int64 /* number of page scans by clock */
+ Cycles int64 /* number of page replacement cycles */
+ PgSteals int64 /* number of page steals */
+ NumPerm int64 /* number of frames used for files */
+ NumPgSp int64 /* number of pages with allocated paging space */
+ RealSystem int64 /* number of pages used by system segments. */
+ RealUser int64 /* number of pages used by non-system segments. */
+ RealProcess int64 /* number of pages used by process segments. */
+ VirtActive int64 /* Active virtual pages. */
+ ComprsdTotal int64 /* Number of pages of this size compressed */
+ ComprsdWsegPgs int64 /* Number of compressed working storage pages */
+ CPgIns int64 /* number of page ins of this page size to compressed pool */
+ CPgOuts int64 /* number of page outs of this page size from compressed pool */
+ CPoolInUse int64 /* Compressed Size of this page size in Compressed Pool */
+ UCPoolSize int64 /* Uncompressed Pool size in bytes of this page size */
+ ComprsdWsegSize int64 /* Total size of the compressed working storage pages in the pool */
+ Version int64 /* version number (1, 2, etc.,) */
+ RealAvail int64 /* number of pages (in 4KB pages) of memory available without paging out working segments */
+}
+
+// paging space types
+const (
+ LV_PAGING = 1
+ NFS_PAGING = 2
+ UNKNOWN_PAGING = 3
+)
+
+type PagingSpace struct {
+ Name string /* Paging space name */
+ Type uint8 /* type of paging device (LV_PAGING or NFS_PAGING) */
+ VGName string /* volume group name */
+ Hostname string /* host name of paging server */
+ Filename string /* swap file name on server */
+ LPSize int64 /* size in number of logical partitions */
+ MBSize int64 /* size in megabytes */
+ MBUsed int64 /* portion used in megabytes */
+ IOPending int64 /* number of pending I/O */
+ Active uint8 /* indicates if active (1 if so, 0 if not) */
+ Automatic uint8 /* indicates if automatic (1 if so, 0 if not) */
+ Version int64 /* version number (1, 2, etc.,) */
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_network.go b/test/integration/vendor/github.com/power-devops/perfstat/types_network.go
new file mode 100644
index 000000000..e69d0041d
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_network.go
@@ -0,0 +1,163 @@
+package perfstat
+
+// Network Interface types
+const (
+ IFT_OTHER = 0x1
+ IFT_1822 = 0x2 /* old-style arpanet imp */
+ IFT_HDH1822 = 0x3 /* HDH arpanet imp */
+ IFT_X25DDN = 0x4 /* x25 to imp */
+ IFT_X25 = 0x5 /* PDN X25 interface (RFC877) */
+ IFT_ETHER = 0x6 /* Ethernet CSMACD */
+ IFT_ISO88023 = 0x7 /* CMSA CD */
+ IFT_ISO88024 = 0x8 /* Token Bus */
+ IFT_ISO88025 = 0x9 /* Token Ring */
+ IFT_ISO88026 = 0xa /* MAN */
+ IFT_STARLAN = 0xb
+ IFT_P10 = 0xc /* Proteon 10MBit ring */
+ IFT_P80 = 0xd /* Proteon 10MBit ring */
+ IFT_HY = 0xe /* Hyperchannel */
+ IFT_FDDI = 0xf
+ IFT_LAPB = 0x10
+ IFT_SDLC = 0x11
+ IFT_T1 = 0x12
+ IFT_CEPT = 0x13 /* E1 - european T1 */
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_PTPSERIAL = 0x16 /* Proprietary PTP serial */
+ IFT_PPP = 0x17 /* RFC 1331 */
+ IFT_LOOP = 0x18 /* loopback */
+ IFT_EON = 0x19 /* ISO over IP */
+ IFT_XETHER = 0x1a /* obsolete 3MB experimental ethernet */
+ IFT_NSIP = 0x1b /* XNS over IP */
+ IFT_SLIP = 0x1c /* IP over generic TTY */
+ IFT_ULTRA = 0x1d /* Ultra Technologies */
+ IFT_DS3 = 0x1e /* Generic T3 */
+ IFT_SIP = 0x1f /* SMDS */
+ IFT_FRELAY = 0x20 /* Frame Relay DTE only */
+ IFT_RS232 = 0x21
+ IFT_PARA = 0x22 /* parallel-port */
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ATM = 0x25 /* ATM cells */
+ IFT_MIOX25 = 0x26
+ IFT_SONET = 0x27 /* SONET or SDH */
+ IFT_X25PLE = 0x28
+ IFT_ISO88022LLC = 0x29
+ IFT_LOCALTALK = 0x2a
+ IFT_SMDSDXI = 0x2b
+ IFT_FRELAYDCE = 0x2c /* Frame Relay DCE */
+ IFT_V35 = 0x2d
+ IFT_HSSI = 0x2e
+ IFT_HIPPI = 0x2f
+ IFT_MODEM = 0x30 /* Generic Modem */
+ IFT_AAL5 = 0x31 /* AAL5 over ATM */
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SMDSICIP = 0x34 /* SMDS InterCarrier Interface */
+ IFT_PROPVIRTUAL = 0x35 /* Proprietary Virtual/internal */
+ IFT_PROPMUX = 0x36 /* Proprietary Multiplexing */
+ IFT_VIPA = 0x37 /* Virtual Interface */
+ IFT_SN = 0x38 /* Federation Switch */
+ IFT_SP = 0x39 /* SP switch */
+ IFT_FCS = 0x3a /* IP over Fiber Channel */
+ IFT_TUNNEL = 0x3b
+ IFT_GIFTUNNEL = 0x3c /* IPv4 over IPv6 tunnel */
+ IFT_HF = 0x3d /* Support for PERCS HFI*/
+ IFT_CLUSTER = 0x3e /* cluster pseudo network interface */
+ IFT_FB = 0xc7 /* IP over Infiniband. Number by IANA */
+)
+
+type NetIfaceTotal struct {
+ Number int32 /* number of network interfaces */
+ IPackets int64 /* number of packets received on interface */
+ IBytes int64 /* number of bytes received on interface */
+ IErrors int64 /* number of input errors on interface */
+ OPackets int64 /* number of packets sent on interface */
+ OBytes int64 /* number of bytes sent on interface */
+ OErrors int64 /* number of output errors on interface */
+ Collisions int64 /* number of collisions on csma interface */
+ XmitDrops int64 /* number of packets not transmitted */
+ Version int64 /* version number (1, 2, etc.,) */
+}
+
+type NetIface struct {
+ Name string /* name of the interface */
+ Description string /* interface description (from ODM, similar to lscfg output) */
+ Type uint8 /* ethernet, tokenring, etc. interpretation can be done using /usr/include/net/if_types.h */
+ MTU int64 /* network frame size */
+ IPackets int64 /* number of packets received on interface */
+ IBytes int64 /* number of bytes received on interface */
+ IErrors int64 /* number of input errors on interface */
+ OPackets int64 /* number of packets sent on interface */
+ OBytes int64 /* number of bytes sent on interface */
+ OErrors int64 /* number of output errors on interface */
+ Collisions int64 /* number of collisions on csma interface */
+ Bitrate int64 /* adapter rating in bit per second */
+ XmitDrops int64 /* number of packets not transmitted */
+ Version int64 /* version number (1, 2, etc.,) */
+ IfIqDrops int64 /* Dropped on input, this interface */
+ IfArpDrops int64 /* Dropped because no arp response */
+}
+
+type NetBuffer struct {
+ Name string /* size in ascii, always power of 2 (ex: "32", "64", "128") */
+ InUse int64 /* number of buffer currently allocated */
+ Calls int64 /* number of buffer allocations since last reset */
+ Delayed int64 /* number of delayed allocations */
+ Free int64 /* number of free calls */
+ Failed int64 /* number of failed allocations */
+ HighWatermark int64 /* high threshold for number of buffer allocated */
+ Freed int64 /* number of buffers freed */
+ Version int64 /* version number (1, 2, etc.,) */
+}
+
+// Network adapter types
+const (
+ NET_PHY = 0 /* physical device */
+ NET_SEA = 1 /* shared ethernet adapter */
+ NET_VIR = 2 /* virtual device */
+ NET_HEA = 3 /* host ethernet adapter */
+ NET_EC = 4 /* etherchannel */
+ NET_VLAN = 5 /* vlan pseudo device */
+)
+
+type NetAdapter struct {
+ Version int64 /* version number (1,2, etc) */
+ Name string /* name of the adapter */
+ TxPackets int64 /* Transmit Packets on interface */
+ TxBytes int64 /* Transmit Bytes on interface */
+ TxInterrupts int64 /* Transfer Interrupts */
+ TxErrors int64 /* Transmit Errors */
+ TxPacketsDropped int64 /* Packets Dropped at the time of Data Transmission */
+ TxQueueSize int64 /* Maximum Packets on Software Transmit Queue */
+ TxQueueLen int64 /* Transmission Queue Length */
+ TxQueueOverflow int64 /* Transmission Queue Overflow */
+ TxBroadcastPackets int64 /* Number of Broadcast Packets Transmitted */
+ TxMulticastPackets int64 /* Number of Multicast packets Transmitted */
+ TxCarrierSense int64 /* Lost Carrier Sense signal count */
+ TxDMAUnderrun int64 /* Count of DMA Under-runs for Transmission */
+ TxLostCTSErrors int64 /* The number of unsuccessful transmissions due to the loss of the Clear-to-Send signal error */
+ TxMaxCollisionErrors int64 /* Maximum Collision Errors at Transmission */
+ TxLateCollisionErrors int64 /* Late Collision Errors at Transmission */
+ TxDeferred int64 /* The number of packets deferred for Transmission. */
+ TxTimeoutErrors int64 /* Time Out Errors for Transmission */
+ TxSingleCollisionCount int64 /* Count of Single Collision error at Transmission */
+ TxMultipleCollisionCount int64 /* Count of Multiple Collision error at Transmission */
+ RxPackets int64 /* Receive Packets on interface */
+ RxBytes int64 /* Receive Bytes on interface */
+ RxInterrupts int64 /* Receive Interrupts */
+ RxErrors int64 /* Input errors on interface */
+ RxPacketsDropped int64 /* The number of packets accepted by the device driver for transmission which were not (for any reason) given to the device. */
+ RxBadPackets int64 /* Count of Bad Packets Received. */
+ RxMulticastPackets int64 /* Number of MultiCast Packets Received */
+ RxBroadcastPackets int64 /* Number of Broadcast Packets Received */
+ RxCRCErrors int64 /* Count of Packets Received with CRC errors */
+ RxDMAOverrun int64 /* Count of DMA over-runs for Data Receival. */
+ RxAlignmentErrors int64 /* Packets Received with Alignment Error */
+ RxNoResourceErrors int64 /* Packets Received with No Resource Errors */
+ RxCollisionErrors int64 /* Packets Received with Collision errors */
+ RxPacketTooShortErrors int64 /* Count of Short Packets Received. */
+ RxPacketTooLongErrors int64 /* Count of Too Long Packets Received. */
+ RxPacketDiscardedByAdapter int64 /* Count of Received Packets discarded by Adapter. */
+ AdapterType int32 /* 0 - Physical, 1 - SEA, 2 - Virtual, 3 -HEA */
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/types_process.go b/test/integration/vendor/github.com/power-devops/perfstat/types_process.go
new file mode 100644
index 000000000..325c70b07
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/types_process.go
@@ -0,0 +1,43 @@
+package perfstat
+
+type Process struct {
+ Version int64 /* version number (1, 2, etc.,) */
+ PID int64 /* Process ID */
+ ProcessName string /* Name of The Process */
+ Priority int32 /* Process Priority */
+ NumThreads int64 /* Thread Count */
+ UID int64 /* Owner Info */
+ ClassID int64 /* WLM Class Name */
+ Size int64 /* Virtual Size of the Process in KB(Exclusive Usage, Leaving all Shared Library Text & Shared File Pages, Shared Memory, Memory Mapped) */
+ RealMemData int64 /* Real Memory used for Data in KB */
+ RealMemText int64 /* Real Memory used for Text in KB */
+ VirtMemData int64 /* Virtual Memory used to Data in KB */
+ VirtMemText int64 /* Virtual Memory used for Text in KB */
+ SharedLibDataSize int64 /* Data Size from Shared Library in KB */
+ HeapSize int64 /* Heap Size in KB */
+ RealInUse int64 /* The Real memory in use(in KB) by the process including all kind of segments (excluding system segments). This includes Text, Data, Shared Library Text, Shared Library Data, File Pages, Shared Memory & Memory Mapped */
+ VirtInUse int64 /* The Virtual memory in use(in KB) by the process including all kind of segments (excluding system segments). This includes Text, Data, Shared Library Text, Shared Library Data, File Pages, Shared Memory & Memory Mapped */
+ Pinned int64 /* Pinned Memory(in KB) for this process inclusive of all segments */
+ PgSpInUse int64 /* Paging Space used(in KB) inclusive of all segments */
+ FilePages int64 /* File Pages used(in KB) including shared pages */
+ RealInUseMap int64 /* Real memory used(in KB) for Shared Memory and Memory Mapped regions */
+ VirtInUseMap int64 /* Virtual Memory used(in KB) for Shared Memory and Memory Mapped regions */
+ PinnedInUseMap int64 /* Pinned memory(in KB) for Shared Memory and Memory Mapped regions */
+ UCpuTime float64 /* User Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_process_util or perfstat_process respectively. */
+ SCpuTime float64 /* System Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_process_util or perfstat_process respectively. */
+ LastTimeBase int64 /* Timebase Counter */
+ InBytes int64 /* Bytes Read from Disk */
+ OutBytes int64 /* Bytes Written to Disk */
+ InOps int64 /* In Operations from Disk */
+ OutOps int64 /* Out Operations from Disk */
+}
+
+type Thread struct {
+ TID int64 /* thread identifier */
+ PID int64 /* process identifier */
+ CpuID int64 /* processor on which I'm bound */
+ UCpuTime float64 /* User Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_thread_util or perfstat_thread respectively. */
+ SCpuTime float64 /* System Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_thread_util or perfstat_thread respectively. */
+ LastTimeBase int64 /* Timebase Counter */
+ Version int64
+}
diff --git a/test/integration/vendor/github.com/power-devops/perfstat/uptime.go b/test/integration/vendor/github.com/power-devops/perfstat/uptime.go
new file mode 100644
index 000000000..2bd3e568d
--- /dev/null
+++ b/test/integration/vendor/github.com/power-devops/perfstat/uptime.go
@@ -0,0 +1,35 @@
+// +build aix
+
+package perfstat
+
+/*
+#include "c_helpers.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "time"
+)
+
+func timeSince(ts uint64) uint64 {
+ return uint64(time.Now().Unix()) - ts
+}
+
+// BootTime() returns the time of the last boot in UNIX seconds
+func BootTime() (uint64, error) {
+ sec := C.boottime()
+ if sec == -1 {
+ return 0, fmt.Errorf("Can't determine boot time")
+ }
+ return uint64(sec), nil
+}
+
+// UptimeSeconds() calculates uptime in seconds
+func UptimeSeconds() (uint64, error) {
+ boot, err := BootTime()
+ if err != nil {
+ return 0, err
+ }
+ return timeSince(boot), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/LICENSE b/test/integration/vendor/github.com/shirou/gopsutil/v3/LICENSE
new file mode 100644
index 000000000..6f06adcbf
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/LICENSE
@@ -0,0 +1,61 @@
+gopsutil is distributed under BSD license reproduced below.
+
+Copyright (c) 2014, WAKAYAMA Shirou
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the gopsutil authors nor the names of its contributors
+ may be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-------
+internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go.
+
+
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go
new file mode 100644
index 000000000..83bc23d45
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go
@@ -0,0 +1,200 @@
+package cpu
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+// TimesStat contains the amounts of time the CPU has spent performing different
+// kinds of work. Time units are in seconds. It is based on linux /proc/stat file.
+type TimesStat struct {
+ CPU string `json:"cpu"`
+ User float64 `json:"user"`
+ System float64 `json:"system"`
+ Idle float64 `json:"idle"`
+ Nice float64 `json:"nice"`
+ Iowait float64 `json:"iowait"`
+ Irq float64 `json:"irq"`
+ Softirq float64 `json:"softirq"`
+ Steal float64 `json:"steal"`
+ Guest float64 `json:"guest"`
+ GuestNice float64 `json:"guestNice"`
+}
+
+type InfoStat struct {
+ CPU int32 `json:"cpu"`
+ VendorID string `json:"vendorId"`
+ Family string `json:"family"`
+ Model string `json:"model"`
+ Stepping int32 `json:"stepping"`
+ PhysicalID string `json:"physicalId"`
+ CoreID string `json:"coreId"`
+ Cores int32 `json:"cores"`
+ ModelName string `json:"modelName"`
+ Mhz float64 `json:"mhz"`
+ CacheSize int32 `json:"cacheSize"`
+ Flags []string `json:"flags"`
+ Microcode string `json:"microcode"`
+}
+
+type lastPercent struct {
+ sync.Mutex
+ lastCPUTimes []TimesStat
+ lastPerCPUTimes []TimesStat
+}
+
+var (
+ lastCPUPercent lastPercent
+ invoke common.Invoker = common.Invoke{}
+)
+
+func init() {
+ lastCPUPercent.Lock()
+ lastCPUPercent.lastCPUTimes, _ = Times(false)
+ lastCPUPercent.lastPerCPUTimes, _ = Times(true)
+ lastCPUPercent.Unlock()
+}
+
+// Counts returns the number of physical or logical cores in the system
+func Counts(logical bool) (int, error) {
+ return CountsWithContext(context.Background(), logical)
+}
+
+func (c TimesStat) String() string {
+ v := []string{
+ `"cpu":"` + c.CPU + `"`,
+ `"user":` + strconv.FormatFloat(c.User, 'f', 1, 64),
+ `"system":` + strconv.FormatFloat(c.System, 'f', 1, 64),
+ `"idle":` + strconv.FormatFloat(c.Idle, 'f', 1, 64),
+ `"nice":` + strconv.FormatFloat(c.Nice, 'f', 1, 64),
+ `"iowait":` + strconv.FormatFloat(c.Iowait, 'f', 1, 64),
+ `"irq":` + strconv.FormatFloat(c.Irq, 'f', 1, 64),
+ `"softirq":` + strconv.FormatFloat(c.Softirq, 'f', 1, 64),
+ `"steal":` + strconv.FormatFloat(c.Steal, 'f', 1, 64),
+ `"guest":` + strconv.FormatFloat(c.Guest, 'f', 1, 64),
+ `"guestNice":` + strconv.FormatFloat(c.GuestNice, 'f', 1, 64),
+ }
+
+ return `{` + strings.Join(v, ",") + `}`
+}
+
+// Deprecated: Total returns the total number of seconds in a CPUTimesStat
+// Please do not use this internal function.
+func (c TimesStat) Total() float64 {
+ total := c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq +
+ c.Softirq + c.Steal + c.Guest + c.GuestNice
+
+ return total
+}
+
+func (c InfoStat) String() string {
+ s, _ := json.Marshal(c)
+ return string(s)
+}
+
+func getAllBusy(t TimesStat) (float64, float64) {
+ tot := t.Total()
+ if runtime.GOOS == "linux" {
+ tot -= t.Guest // Linux 2.6.24+
+ tot -= t.GuestNice // Linux 3.2.0+
+ }
+
+ busy := tot - t.Idle - t.Iowait
+
+ return tot, busy
+}
+
+func calculateBusy(t1, t2 TimesStat) float64 {
+ t1All, t1Busy := getAllBusy(t1)
+ t2All, t2Busy := getAllBusy(t2)
+
+ if t2Busy <= t1Busy {
+ return 0
+ }
+ if t2All <= t1All {
+ return 100
+ }
+ return math.Min(100, math.Max(0, (t2Busy-t1Busy)/(t2All-t1All)*100))
+}
+
+func calculateAllBusy(t1, t2 []TimesStat) ([]float64, error) {
+ // Make sure the CPU measurements have the same length.
+ if len(t1) != len(t2) {
+ return nil, fmt.Errorf(
+ "received two CPU counts: %d != %d",
+ len(t1), len(t2),
+ )
+ }
+
+ ret := make([]float64, len(t1))
+ for i, t := range t2 {
+ ret[i] = calculateBusy(t1[i], t)
+ }
+ return ret, nil
+}
+
+// Percent calculates the percentage of cpu used either per CPU or combined.
+// If an interval of 0 is given it will compare the current cpu times against the last call.
+// Returns one value per cpu, or a single value if percpu is set to false.
+func Percent(interval time.Duration, percpu bool) ([]float64, error) {
+ return PercentWithContext(context.Background(), interval, percpu)
+}
+
+func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) {
+ if interval <= 0 {
+ return percentUsedFromLastCallWithContext(ctx, percpu)
+ }
+
+ // Get CPU usage at the start of the interval.
+ cpuTimes1, err := TimesWithContext(ctx, percpu)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := common.Sleep(ctx, interval); err != nil {
+ return nil, err
+ }
+
+ // And at the end of the interval.
+ cpuTimes2, err := TimesWithContext(ctx, percpu)
+ if err != nil {
+ return nil, err
+ }
+
+ return calculateAllBusy(cpuTimes1, cpuTimes2)
+}
+
+func percentUsedFromLastCall(percpu bool) ([]float64, error) {
+ return percentUsedFromLastCallWithContext(context.Background(), percpu)
+}
+
+func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]float64, error) {
+ cpuTimes, err := TimesWithContext(ctx, percpu)
+ if err != nil {
+ return nil, err
+ }
+ lastCPUPercent.Lock()
+ defer lastCPUPercent.Unlock()
+ var lastTimes []TimesStat
+ if percpu {
+ lastTimes = lastCPUPercent.lastPerCPUTimes
+ lastCPUPercent.lastPerCPUTimes = cpuTimes
+ } else {
+ lastTimes = lastCPUPercent.lastCPUTimes
+ lastCPUPercent.lastCPUTimes = cpuTimes
+ }
+
+ if lastTimes == nil {
+ return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil")
+ }
+ return calculateAllBusy(lastTimes, cpuTimes)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go
new file mode 100644
index 000000000..1439d1d79
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go
@@ -0,0 +1,16 @@
+//go:build aix
+// +build aix
+
+package cpu
+
+import (
+ "context"
+)
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go
new file mode 100644
index 000000000..9c1e70b17
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go
@@ -0,0 +1,66 @@
+//go:build aix && cgo
+// +build aix,cgo
+
+package cpu
+
+import (
+ "context"
+
+ "github.com/power-devops/perfstat"
+)
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ var ret []TimesStat
+ if percpu {
+ cpus, err := perfstat.CpuStat()
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range cpus {
+ ct := &TimesStat{
+ CPU: c.Name,
+ Idle: float64(c.Idle),
+ User: float64(c.User),
+ System: float64(c.Sys),
+ Iowait: float64(c.Wait),
+ }
+ ret = append(ret, *ct)
+ }
+ } else {
+ c, err := perfstat.CpuUtilTotalStat()
+ if err != nil {
+ return nil, err
+ }
+ ct := &TimesStat{
+ CPU: "cpu-total",
+ Idle: float64(c.IdlePct),
+ User: float64(c.UserPct),
+ System: float64(c.KernPct),
+ Iowait: float64(c.WaitPct),
+ }
+ ret = append(ret, *ct)
+ }
+ return ret, nil
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ c, err := perfstat.CpuTotalStat()
+ if err != nil {
+ return nil, err
+ }
+ info := InfoStat{
+ CPU: 0,
+ Mhz: float64(c.ProcessorHz / 1000000),
+ Cores: int32(c.NCpusCfg),
+ }
+ result := []InfoStat{info}
+ return result, nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ c, err := perfstat.CpuTotalStat()
+ if err != nil {
+ return 0, err
+ }
+ return c.NCpusCfg, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go
new file mode 100644
index 000000000..d158000ea
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go
@@ -0,0 +1,95 @@
+//go:build aix && !cgo
+// +build aix,!cgo
+
+package cpu
+
+import (
+ "context"
+ "regexp"
+ "strings"
+ "strconv"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var whiteSpaces = regexp.MustCompile(`\s+`)
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ if percpu {
+ return []TimesStat{}, common.ErrNotImplementedError
+ } else {
+ out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1")
+ if err != nil {
+ return nil, err
+ }
+ lines := strings.Split(string(out), "\n")
+ if len(lines) < 5 {
+ return []TimesStat{}, common.ErrNotImplementedError
+ }
+
+ ret := TimesStat{CPU: "cpu-total"}
+ h := whiteSpaces.Split(lines[len(lines)-3], -1) // headers
+ v := whiteSpaces.Split(lines[len(lines)-2], -1) // values
+ for i, header := range h {
+ if t, err := strconv.ParseFloat(v[i], 64); err == nil {
+ switch header {
+ case `%usr`:
+ ret.User = t
+ case `%sys`:
+ ret.System = t
+ case `%wio`:
+ ret.Iowait = t
+ case `%idle`:
+ ret.Idle = t
+ }
+ }
+ }
+
+ return []TimesStat{ret}, nil
+ }
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ out, err := invoke.CommandWithContext(ctx, "prtconf")
+ if err != nil {
+ return nil, err
+ }
+
+ ret := InfoStat{}
+ for _, line := range strings.Split(string(out), "\n") {
+ if strings.HasPrefix(line, "Number Of Processors:") {
+ p := whiteSpaces.Split(line, 4)
+ if len(p) > 3 {
+ if t, err := strconv.ParseUint(p[3], 10, 64); err == nil {
+ ret.Cores = int32(t)
+ }
+ }
+ } else if strings.HasPrefix(line, "Processor Clock Speed:") {
+ p := whiteSpaces.Split(line, 5)
+ if len(p) > 4 {
+ if t, err := strconv.ParseFloat(p[3], 64); err == nil {
+ switch strings.ToUpper(p[4]) {
+ case "MHZ":
+ ret.Mhz = t
+ case "GHZ":
+ ret.Mhz = t * 1000.0
+ case "KHZ":
+ ret.Mhz = t / 1000.0
+ default:
+ ret.Mhz = t
+ }
+ }
+ }
+ break
+ }
+ }
+ return []InfoStat{ret}, nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ info, err := InfoWithContext(ctx)
+ if err == nil {
+ return int(info[0].Cores), nil
+ }
+ return 0, err
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go
new file mode 100644
index 000000000..7acb258d9
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go
@@ -0,0 +1,112 @@
+//go:build darwin
+// +build darwin
+
+package cpu
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "github.com/tklauser/go-sysconf"
+ "golang.org/x/sys/unix"
+)
+
+// sys/resource.h
+const (
+ CPUser = 0
+ cpNice = 1
+ cpSys = 2
+ cpIntr = 3
+ cpIdle = 4
+ cpUStates = 5
+)
+
+// default value. from time.h
+var ClocksPerSec = float64(128)
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ ClocksPerSec = float64(clkTck)
+ }
+}
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ if percpu {
+ return perCPUTimes()
+ }
+
+ return allCPUTimes()
+}
+
+// Returns only one CPUInfoStat on FreeBSD
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ var ret []InfoStat
+
+ c := InfoStat{}
+ c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string")
+ family, _ := unix.SysctlUint32("machdep.cpu.family")
+ c.Family = strconv.FormatUint(uint64(family), 10)
+ model, _ := unix.SysctlUint32("machdep.cpu.model")
+ c.Model = strconv.FormatUint(uint64(model), 10)
+ stepping, _ := unix.SysctlUint32("machdep.cpu.stepping")
+ c.Stepping = int32(stepping)
+ features, err := unix.Sysctl("machdep.cpu.features")
+ if err == nil {
+ for _, v := range strings.Fields(features) {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ }
+ leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features")
+ if err == nil {
+ for _, v := range strings.Fields(leaf7Features) {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ }
+ extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures")
+ if err == nil {
+ for _, v := range strings.Fields(extfeatures) {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ }
+ cores, _ := unix.SysctlUint32("machdep.cpu.core_count")
+ c.Cores = int32(cores)
+ cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size")
+ c.CacheSize = int32(cacheSize)
+ c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor")
+
+ // Use the rated frequency of the CPU. This is a static value and does not
+ // account for low power or Turbo Boost modes.
+ cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency")
+ if err == nil {
+ c.Mhz = float64(cpuFrequency) / 1000000.0
+ }
+
+ return append(ret, c), nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ var cpuArgument string
+ if logical {
+ cpuArgument = "hw.logicalcpu"
+ } else {
+ cpuArgument = "hw.physicalcpu"
+ }
+
+ count, err := unix.SysctlUint32(cpuArgument)
+ if err != nil {
+ return 0, err
+ }
+
+ return int(count), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go
new file mode 100644
index 000000000..1d5f0772e
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go
@@ -0,0 +1,111 @@
+//go:build darwin && cgo
+// +build darwin,cgo
+
+package cpu
+
+/*
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if TARGET_OS_MAC
+#include
+#endif
+#include
+#include
+*/
+import "C"
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "unsafe"
+)
+
+// these CPU times for darwin is borrowed from influxdb/telegraf.
+
+func perCPUTimes() ([]TimesStat, error) {
+ var (
+ count C.mach_msg_type_number_t
+ cpuload *C.processor_cpu_load_info_data_t
+ ncpu C.natural_t
+ )
+
+ status := C.host_processor_info(C.host_t(C.mach_host_self()),
+ C.PROCESSOR_CPU_LOAD_INFO,
+ &ncpu,
+ (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
+ &count)
+
+ if status != C.KERN_SUCCESS {
+ return nil, fmt.Errorf("host_processor_info error=%d", status)
+ }
+
+ // jump through some cgo casting hoops and ensure we properly free
+ // the memory that cpuload points to
+ target := C.vm_map_t(C.mach_task_self_)
+ address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
+ defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))
+
+ // the body of struct processor_cpu_load_info
+ // aka processor_cpu_load_info_data_t
+ var cpu_ticks [C.CPU_STATE_MAX]uint32
+
+ // copy the cpuload array to a []byte buffer
+ // where we can binary.Read the data
+ size := int(ncpu) * binary.Size(cpu_ticks)
+ buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size]
+
+ bbuf := bytes.NewBuffer(buf)
+
+ var ret []TimesStat
+
+ for i := 0; i < int(ncpu); i++ {
+ err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
+ if err != nil {
+ return nil, err
+ }
+
+ c := TimesStat{
+ CPU: fmt.Sprintf("cpu%d", i),
+ User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
+ System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
+ Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
+ Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
+ }
+
+ ret = append(ret, c)
+ }
+
+ return ret, nil
+}
+
+func allCPUTimes() ([]TimesStat, error) {
+ var count C.mach_msg_type_number_t
+ var cpuload C.host_cpu_load_info_data_t
+
+ count = C.HOST_CPU_LOAD_INFO_COUNT
+
+ status := C.host_statistics(C.host_t(C.mach_host_self()),
+ C.HOST_CPU_LOAD_INFO,
+ C.host_info_t(unsafe.Pointer(&cpuload)),
+ &count)
+
+ if status != C.KERN_SUCCESS {
+ return nil, fmt.Errorf("host_statistics error=%d", status)
+ }
+
+ c := TimesStat{
+ CPU: "cpu-total",
+ User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
+ System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
+ Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
+ Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
+ }
+
+ return []TimesStat{c}, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go
new file mode 100644
index 000000000..e067e99f9
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go
@@ -0,0 +1,14 @@
+//go:build darwin && !cgo
+// +build darwin,!cgo
+
+package cpu
+
+import "github.com/shirou/gopsutil/v3/internal/common"
+
+func perCPUTimes() ([]TimesStat, error) {
+ return []TimesStat{}, common.ErrNotImplementedError
+}
+
+func allCPUTimes() ([]TimesStat, error) {
+ return []TimesStat{}, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go
new file mode 100644
index 000000000..fef53e5dc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go
@@ -0,0 +1,156 @@
+package cpu
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/tklauser/go-sysconf"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ ClocksPerSec = float64(128)
+ cpuMatch = regexp.MustCompile(`^CPU:`)
+ originMatch = regexp.MustCompile(`Origin\s*=\s*"(.+)"\s+Id\s*=\s*(.+)\s+Stepping\s*=\s*(.+)`)
+ featuresMatch = regexp.MustCompile(`Features=.+<(.+)>`)
+ featuresMatch2 = regexp.MustCompile(`Features2=[a-f\dx]+<(.+)>`)
+ cpuEnd = regexp.MustCompile(`^Trying to mount root`)
+ cpuTimesSize int
+ emptyTimes cpuTimes
+)
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ ClocksPerSec = float64(clkTck)
+ }
+}
+
+func timeStat(name string, t *cpuTimes) *TimesStat {
+ return &TimesStat{
+ User: float64(t.User) / ClocksPerSec,
+ Nice: float64(t.Nice) / ClocksPerSec,
+ System: float64(t.Sys) / ClocksPerSec,
+ Idle: float64(t.Idle) / ClocksPerSec,
+ Irq: float64(t.Intr) / ClocksPerSec,
+ CPU: name,
+ }
+}
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ if percpu {
+ buf, err := unix.SysctlRaw("kern.cp_times")
+ if err != nil {
+ return nil, err
+ }
+
+ // We can't do this in init due to the conflict with cpu.init()
+ if cpuTimesSize == 0 {
+ cpuTimesSize = int(reflect.TypeOf(cpuTimes{}).Size())
+ }
+
+ ncpus := len(buf) / cpuTimesSize
+ ret := make([]TimesStat, 0, ncpus)
+ for i := 0; i < ncpus; i++ {
+ times := (*cpuTimes)(unsafe.Pointer(&buf[i*cpuTimesSize]))
+ if *times == emptyTimes {
+ // CPU not present
+ continue
+ }
+ ret = append(ret, *timeStat(fmt.Sprintf("cpu%d", len(ret)), times))
+ }
+ return ret, nil
+ }
+
+ buf, err := unix.SysctlRaw("kern.cp_time")
+ if err != nil {
+ return nil, err
+ }
+
+ times := (*cpuTimes)(unsafe.Pointer(&buf[0]))
+ return []TimesStat{*timeStat("cpu-total", times)}, nil
+}
+
+// Returns only one InfoStat on DragonflyBSD. The information regarding core
+// count, however is accurate and it is assumed that all InfoStat attributes
+// are the same across CPUs.
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ const dmesgBoot = "/var/run/dmesg.boot"
+
+ c, err := parseDmesgBoot(dmesgBoot)
+ if err != nil {
+ return nil, err
+ }
+
+ var u32 uint32
+ if u32, err = unix.SysctlUint32("hw.clockrate"); err != nil {
+ return nil, err
+ }
+ c.Mhz = float64(u32)
+
+ var num int
+ var buf string
+ if buf, err = unix.Sysctl("hw.cpu_topology.tree"); err != nil {
+ return nil, err
+ }
+ num = strings.Count(buf, "CHIP")
+ c.Cores = int32(strings.Count(string(buf), "CORE") / num)
+
+ if c.ModelName, err = unix.Sysctl("hw.model"); err != nil {
+ return nil, err
+ }
+
+ ret := make([]InfoStat, num)
+ for i := 0; i < num; i++ {
+ ret[i] = c
+ }
+
+ return ret, nil
+}
+
+func parseDmesgBoot(fileName string) (InfoStat, error) {
+ c := InfoStat{}
+ lines, _ := common.ReadLines(fileName)
+ for _, line := range lines {
+ if matches := cpuEnd.FindStringSubmatch(line); matches != nil {
+ break
+ } else if matches := originMatch.FindStringSubmatch(line); matches != nil {
+ c.VendorID = matches[1]
+ t, err := strconv.ParseInt(matches[2], 10, 32)
+ if err != nil {
+ return c, fmt.Errorf("unable to parse DragonflyBSD CPU stepping information from %q: %v", line, err)
+ }
+ c.Stepping = int32(t)
+ } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil {
+ for _, v := range strings.Split(matches[1], ",") {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ } else if matches := featuresMatch2.FindStringSubmatch(line); matches != nil {
+ for _, v := range strings.Split(matches[1], ",") {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ }
+ }
+
+ return c, nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ return runtime.NumCPU(), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go
new file mode 100644
index 000000000..57e14528d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go
@@ -0,0 +1,9 @@
+package cpu
+
+type cpuTimes struct {
+ User uint64
+ Nice uint64
+ Sys uint64
+ Intr uint64
+ Idle uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go
new file mode 100644
index 000000000..6d7007ff9
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go
@@ -0,0 +1,31 @@
+//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !dragonfly && !plan9 && !aix
+// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!dragonfly,!plan9,!aix
+
+package cpu
+
+import (
+ "context"
+ "runtime"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ return []TimesStat{}, common.ErrNotImplementedError
+}
+
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ return []InfoStat{}, common.ErrNotImplementedError
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ return runtime.NumCPU(), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go
new file mode 100644
index 000000000..d3f47353c
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go
@@ -0,0 +1,168 @@
+package cpu
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/tklauser/go-sysconf"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ ClocksPerSec = float64(128)
+ cpuMatch = regexp.MustCompile(`^CPU:`)
+ originMatch = regexp.MustCompile(`Origin\s*=\s*"(.+)"\s+Id\s*=\s*(.+)\s+Family\s*=\s*(.+)\s+Model\s*=\s*(.+)\s+Stepping\s*=\s*(.+)`)
+ featuresMatch = regexp.MustCompile(`Features=.+<(.+)>`)
+ featuresMatch2 = regexp.MustCompile(`Features2=[a-f\dx]+<(.+)>`)
+ cpuEnd = regexp.MustCompile(`^Trying to mount root`)
+ cpuCores = regexp.MustCompile(`FreeBSD/SMP: (\d*) package\(s\) x (\d*) core\(s\)`)
+ cpuTimesSize int
+ emptyTimes cpuTimes
+)
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ ClocksPerSec = float64(clkTck)
+ }
+}
+
+func timeStat(name string, t *cpuTimes) *TimesStat {
+ return &TimesStat{
+ User: float64(t.User) / ClocksPerSec,
+ Nice: float64(t.Nice) / ClocksPerSec,
+ System: float64(t.Sys) / ClocksPerSec,
+ Idle: float64(t.Idle) / ClocksPerSec,
+ Irq: float64(t.Intr) / ClocksPerSec,
+ CPU: name,
+ }
+}
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ if percpu {
+ buf, err := unix.SysctlRaw("kern.cp_times")
+ if err != nil {
+ return nil, err
+ }
+
+ // We can't do this in init due to the conflict with cpu.init()
+ if cpuTimesSize == 0 {
+ cpuTimesSize = int(reflect.TypeOf(cpuTimes{}).Size())
+ }
+
+ ncpus := len(buf) / cpuTimesSize
+ ret := make([]TimesStat, 0, ncpus)
+ for i := 0; i < ncpus; i++ {
+ times := (*cpuTimes)(unsafe.Pointer(&buf[i*cpuTimesSize]))
+ if *times == emptyTimes {
+ // CPU not present
+ continue
+ }
+ ret = append(ret, *timeStat(fmt.Sprintf("cpu%d", len(ret)), times))
+ }
+ return ret, nil
+ }
+
+ buf, err := unix.SysctlRaw("kern.cp_time")
+ if err != nil {
+ return nil, err
+ }
+
+ times := (*cpuTimes)(unsafe.Pointer(&buf[0]))
+ return []TimesStat{*timeStat("cpu-total", times)}, nil
+}
+
+// Returns only one InfoStat on FreeBSD. The information regarding core
+// count, however is accurate and it is assumed that all InfoStat attributes
+// are the same across CPUs.
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ const dmesgBoot = "/var/run/dmesg.boot"
+
+ c, num, err := parseDmesgBoot(dmesgBoot)
+ if err != nil {
+ return nil, err
+ }
+
+ var u32 uint32
+ if u32, err = unix.SysctlUint32("hw.clockrate"); err != nil {
+ return nil, err
+ }
+ c.Mhz = float64(u32)
+
+ if u32, err = unix.SysctlUint32("hw.ncpu"); err != nil {
+ return nil, err
+ }
+ c.Cores = int32(u32)
+
+ if c.ModelName, err = unix.Sysctl("hw.model"); err != nil {
+ return nil, err
+ }
+
+ ret := make([]InfoStat, num)
+ for i := 0; i < num; i++ {
+ ret[i] = c
+ }
+
+ return ret, nil
+}
+
+func parseDmesgBoot(fileName string) (InfoStat, int, error) {
+ c := InfoStat{}
+ lines, _ := common.ReadLines(fileName)
+ cpuNum := 1 // default cpu num is 1
+ for _, line := range lines {
+ if matches := cpuEnd.FindStringSubmatch(line); matches != nil {
+ break
+ } else if matches := originMatch.FindStringSubmatch(line); matches != nil {
+ c.VendorID = matches[1]
+ c.Family = matches[3]
+ c.Model = matches[4]
+ t, err := strconv.ParseInt(matches[5], 10, 32)
+ if err != nil {
+ return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err)
+ }
+ c.Stepping = int32(t)
+ } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil {
+ for _, v := range strings.Split(matches[1], ",") {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ } else if matches := featuresMatch2.FindStringSubmatch(line); matches != nil {
+ for _, v := range strings.Split(matches[1], ",") {
+ c.Flags = append(c.Flags, strings.ToLower(v))
+ }
+ } else if matches := cpuCores.FindStringSubmatch(line); matches != nil {
+ t, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err)
+ }
+ cpuNum = int(t)
+ t2, err := strconv.ParseInt(matches[2], 10, 32)
+ if err != nil {
+ return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err)
+ }
+ c.Cores = int32(t2)
+ }
+ }
+
+ return c, cpuNum, nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ return runtime.NumCPU(), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go
new file mode 100644
index 000000000..8b7f4c321
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go
@@ -0,0 +1,9 @@
+package cpu
+
+type cpuTimes struct {
+ User uint32
+ Nice uint32
+ Sys uint32
+ Intr uint32
+ Idle uint32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go
new file mode 100644
index 000000000..57e14528d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go
@@ -0,0 +1,9 @@
+package cpu
+
+type cpuTimes struct {
+ User uint64
+ Nice uint64
+ Sys uint64
+ Intr uint64
+ Idle uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go
new file mode 100644
index 000000000..8b7f4c321
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go
@@ -0,0 +1,9 @@
+package cpu
+
+type cpuTimes struct {
+ User uint32
+ Nice uint32
+ Sys uint32
+ Intr uint32
+ Idle uint32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go
new file mode 100644
index 000000000..57e14528d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go
@@ -0,0 +1,9 @@
+package cpu
+
+type cpuTimes struct {
+ User uint64
+ Nice uint64
+ Sys uint64
+ Intr uint64
+ Idle uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go
new file mode 100644
index 000000000..4f26230d6
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go
@@ -0,0 +1,400 @@
+//go:build linux
+// +build linux
+
+package cpu
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/tklauser/go-sysconf"
+)
+
+var ClocksPerSec = float64(100)
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ ClocksPerSec = float64(clkTck)
+ }
+}
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ filename := common.HostProc("stat")
+ lines := []string{}
+ if percpu {
+ statlines, err := common.ReadLines(filename)
+ if err != nil || len(statlines) < 2 {
+ return []TimesStat{}, nil
+ }
+ for _, line := range statlines[1:] {
+ if !strings.HasPrefix(line, "cpu") {
+ break
+ }
+ lines = append(lines, line)
+ }
+ } else {
+ lines, _ = common.ReadLinesOffsetN(filename, 0, 1)
+ }
+
+ ret := make([]TimesStat, 0, len(lines))
+
+ for _, line := range lines {
+ ct, err := parseStatLine(line)
+ if err != nil {
+ continue
+ }
+ ret = append(ret, *ct)
+
+ }
+ return ret, nil
+}
+
+func sysCPUPath(cpu int32, relPath string) string {
+ return common.HostSys(fmt.Sprintf("devices/system/cpu/cpu%d", cpu), relPath)
+}
+
+func finishCPUInfo(c *InfoStat) {
+ var lines []string
+ var err error
+ var value float64
+
+ if len(c.CoreID) == 0 {
+ lines, err = common.ReadLines(sysCPUPath(c.CPU, "topology/core_id"))
+ if err == nil {
+ c.CoreID = lines[0]
+ }
+ }
+
+ // override the value of c.Mhz with cpufreq/cpuinfo_max_freq regardless
+ // of the value from /proc/cpuinfo because we want to report the maximum
+ // clock-speed of the CPU for c.Mhz, matching the behaviour of Windows
+ lines, err = common.ReadLines(sysCPUPath(c.CPU, "cpufreq/cpuinfo_max_freq"))
+ // if we encounter errors below such as there are no cpuinfo_max_freq file,
+ // we just ignore. so let Mhz is 0.
+ if err != nil || len(lines) == 0 {
+ return
+ }
+ value, err = strconv.ParseFloat(lines[0], 64)
+ if err != nil {
+ return
+ }
+ c.Mhz = value / 1000.0 // value is in kHz
+ if c.Mhz > 9999 {
+ c.Mhz = c.Mhz / 1000.0 // value in Hz
+ }
+}
+
+// CPUInfo on linux will return 1 item per physical thread.
+//
+// CPUs have three levels of counting: sockets, cores, threads.
+// Cores with HyperThreading count as having 2 threads per core.
+// Sockets often come with many physical CPU cores.
+// For example a single socket board with two cores each with HT will
+// return 4 CPUInfoStat structs on Linux and the "Cores" field set to 1.
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ filename := common.HostProc("cpuinfo")
+ lines, _ := common.ReadLines(filename)
+
+ var ret []InfoStat
+ var processorName string
+
+ c := InfoStat{CPU: -1, Cores: 1}
+ for _, line := range lines {
+ fields := strings.Split(line, ":")
+ if len(fields) < 2 {
+ continue
+ }
+ key := strings.TrimSpace(fields[0])
+ value := strings.TrimSpace(fields[1])
+
+ switch key {
+ case "Processor":
+ processorName = value
+ case "processor":
+ if c.CPU >= 0 {
+ finishCPUInfo(&c)
+ ret = append(ret, c)
+ }
+ c = InfoStat{Cores: 1, ModelName: processorName}
+ t, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ c.CPU = int32(t)
+ case "vendorId", "vendor_id":
+ c.VendorID = value
+ case "CPU implementer":
+ if v, err := strconv.ParseUint(value, 0, 8); err == nil {
+ switch v {
+ case 0x41:
+ c.VendorID = "ARM"
+ case 0x42:
+ c.VendorID = "Broadcom"
+ case 0x43:
+ c.VendorID = "Cavium"
+ case 0x44:
+ c.VendorID = "DEC"
+ case 0x46:
+ c.VendorID = "Fujitsu"
+ case 0x48:
+ c.VendorID = "HiSilicon"
+ case 0x49:
+ c.VendorID = "Infineon"
+ case 0x4d:
+ c.VendorID = "Motorola/Freescale"
+ case 0x4e:
+ c.VendorID = "NVIDIA"
+ case 0x50:
+ c.VendorID = "APM"
+ case 0x51:
+ c.VendorID = "Qualcomm"
+ case 0x56:
+ c.VendorID = "Marvell"
+ case 0x61:
+ c.VendorID = "Apple"
+ case 0x69:
+ c.VendorID = "Intel"
+ case 0xc0:
+ c.VendorID = "Ampere"
+ }
+ }
+ case "cpu family":
+ c.Family = value
+ case "model", "CPU part":
+ c.Model = value
+ case "model name", "cpu":
+ c.ModelName = value
+ if strings.Contains(value, "POWER8") ||
+ strings.Contains(value, "POWER7") {
+ c.Model = strings.Split(value, " ")[0]
+ c.Family = "POWER"
+ c.VendorID = "IBM"
+ }
+ case "stepping", "revision", "CPU revision":
+ val := value
+
+ if key == "revision" {
+ val = strings.Split(value, ".")[0]
+ }
+
+ t, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ c.Stepping = int32(t)
+ case "cpu MHz", "clock":
+ // treat this as the fallback value, thus we ignore error
+ if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil {
+ c.Mhz = t
+ }
+ case "cache size":
+ t, err := strconv.ParseInt(strings.Replace(value, " KB", "", 1), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ c.CacheSize = int32(t)
+ case "physical id":
+ c.PhysicalID = value
+ case "core id":
+ c.CoreID = value
+ case "flags", "Features":
+ c.Flags = strings.FieldsFunc(value, func(r rune) bool {
+ return r == ',' || r == ' '
+ })
+ case "microcode":
+ c.Microcode = value
+ }
+ }
+ if c.CPU >= 0 {
+ finishCPUInfo(&c)
+ ret = append(ret, c)
+ }
+ return ret, nil
+}
+
+func parseStatLine(line string) (*TimesStat, error) {
+ fields := strings.Fields(line)
+
+ if len(fields) == 0 {
+ return nil, errors.New("stat does not contain cpu info")
+ }
+
+ if !strings.HasPrefix(fields[0], "cpu") {
+ return nil, errors.New("not contain cpu")
+ }
+
+ cpu := fields[0]
+ if cpu == "cpu" {
+ cpu = "cpu-total"
+ }
+ user, err := strconv.ParseFloat(fields[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ nice, err := strconv.ParseFloat(fields[2], 64)
+ if err != nil {
+ return nil, err
+ }
+ system, err := strconv.ParseFloat(fields[3], 64)
+ if err != nil {
+ return nil, err
+ }
+ idle, err := strconv.ParseFloat(fields[4], 64)
+ if err != nil {
+ return nil, err
+ }
+ iowait, err := strconv.ParseFloat(fields[5], 64)
+ if err != nil {
+ return nil, err
+ }
+ irq, err := strconv.ParseFloat(fields[6], 64)
+ if err != nil {
+ return nil, err
+ }
+ softirq, err := strconv.ParseFloat(fields[7], 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ct := &TimesStat{
+ CPU: cpu,
+ User: user / ClocksPerSec,
+ Nice: nice / ClocksPerSec,
+ System: system / ClocksPerSec,
+ Idle: idle / ClocksPerSec,
+ Iowait: iowait / ClocksPerSec,
+ Irq: irq / ClocksPerSec,
+ Softirq: softirq / ClocksPerSec,
+ }
+ if len(fields) > 8 { // Linux >= 2.6.11
+ steal, err := strconv.ParseFloat(fields[8], 64)
+ if err != nil {
+ return nil, err
+ }
+ ct.Steal = steal / ClocksPerSec
+ }
+ if len(fields) > 9 { // Linux >= 2.6.24
+ guest, err := strconv.ParseFloat(fields[9], 64)
+ if err != nil {
+ return nil, err
+ }
+ ct.Guest = guest / ClocksPerSec
+ }
+ if len(fields) > 10 { // Linux >= 3.2.0
+ guestNice, err := strconv.ParseFloat(fields[10], 64)
+ if err != nil {
+ return nil, err
+ }
+ ct.GuestNice = guestNice / ClocksPerSec
+ }
+
+ return ct, nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ if logical {
+ ret := 0
+ // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_pslinux.py#L599
+ procCpuinfo := common.HostProc("cpuinfo")
+ lines, err := common.ReadLines(procCpuinfo)
+ if err == nil {
+ for _, line := range lines {
+ line = strings.ToLower(line)
+ if strings.HasPrefix(line, "processor") {
+ _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:]))
+ if err == nil {
+ ret++
+ }
+ }
+ }
+ }
+ if ret == 0 {
+ procStat := common.HostProc("stat")
+ lines, err = common.ReadLines(procStat)
+ if err != nil {
+ return 0, err
+ }
+ for _, line := range lines {
+ if len(line) >= 4 && strings.HasPrefix(line, "cpu") && '0' <= line[3] && line[3] <= '9' { // `^cpu\d` regexp matching
+ ret++
+ }
+ }
+ }
+ return ret, nil
+ }
+ // physical cores
+ // https://github.com/giampaolo/psutil/blob/8415355c8badc9c94418b19bdf26e622f06f0cce/psutil/_pslinux.py#L615-L628
+ threadSiblingsLists := make(map[string]bool)
+ // These 2 files are the same but */core_cpus_list is newer while */thread_siblings_list is deprecated and may disappear in the future.
+ // https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst
+ // https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
+ // https://lkml.org/lkml/2019/2/26/41
+ for _, glob := range []string{"devices/system/cpu/cpu[0-9]*/topology/core_cpus_list", "devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"} {
+ if files, err := filepath.Glob(common.HostSys(glob)); err == nil {
+ for _, file := range files {
+ lines, err := common.ReadLines(file)
+ if err != nil || len(lines) != 1 {
+ continue
+ }
+ threadSiblingsLists[lines[0]] = true
+ }
+ ret := len(threadSiblingsLists)
+ if ret != 0 {
+ return ret, nil
+ }
+ }
+ }
+ // https://github.com/giampaolo/psutil/blob/122174a10b75c9beebe15f6c07dcf3afbe3b120d/psutil/_pslinux.py#L631-L652
+ filename := common.HostProc("cpuinfo")
+ lines, err := common.ReadLines(filename)
+ if err != nil {
+ return 0, err
+ }
+ mapping := make(map[int]int)
+ currentInfo := make(map[string]int)
+ for _, line := range lines {
+ line = strings.ToLower(strings.TrimSpace(line))
+ if line == "" {
+ // new section
+ id, okID := currentInfo["physical id"]
+ cores, okCores := currentInfo["cpu cores"]
+ if okID && okCores {
+ mapping[id] = cores
+ }
+ currentInfo = make(map[string]int)
+ continue
+ }
+ fields := strings.Split(line, ":")
+ if len(fields) < 2 {
+ continue
+ }
+ fields[0] = strings.TrimSpace(fields[0])
+ if fields[0] == "physical id" || fields[0] == "cpu cores" {
+ val, err := strconv.Atoi(strings.TrimSpace(fields[1]))
+ if err != nil {
+ continue
+ }
+ currentInfo[fields[0]] = val
+ }
+ }
+ ret := 0
+ for _, v := range mapping {
+ ret += v
+ }
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go
new file mode 100644
index 000000000..fe3329030
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go
@@ -0,0 +1,137 @@
+//go:build openbsd
+// +build openbsd
+
+package cpu
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/tklauser/go-sysconf"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // sys/sched.h
+ cpuOnline = 0x0001 // CPUSTATS_ONLINE
+
+ // sys/sysctl.h
+ ctlKern = 1 // "high kernel": proc, limits
+ ctlHw = 6 // CTL_HW
+ smt = 24 // HW_SMT
+ kernCpTime = 40 // KERN_CPTIME
+ kernCPUStats = 85 // KERN_CPUSTATS
+)
+
+var ClocksPerSec = float64(128)
+
+type cpuStats struct {
+ // cs_time[CPUSTATES]
+ User uint64
+ Nice uint64
+ Sys uint64
+ Spin uint64
+ Intr uint64
+ Idle uint64
+
+ // cs_flags
+ Flags uint64
+}
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ ClocksPerSec = float64(clkTck)
+ }
+}
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) {
+ if !percpu {
+ mib := []int32{ctlKern, kernCpTime}
+ buf, _, err := common.CallSyscall(mib)
+ if err != nil {
+ return ret, err
+ }
+ times := (*cpuTimes)(unsafe.Pointer(&buf[0]))
+ stat := TimesStat{
+ CPU: "cpu-total",
+ User: float64(times.User) / ClocksPerSec,
+ Nice: float64(times.Nice) / ClocksPerSec,
+ System: float64(times.Sys) / ClocksPerSec,
+ Idle: float64(times.Idle) / ClocksPerSec,
+ Irq: float64(times.Intr) / ClocksPerSec,
+ }
+ return []TimesStat{stat}, nil
+ }
+
+ ncpu, err := unix.SysctlUint32("hw.ncpu")
+ if err != nil {
+ return
+ }
+
+ var i uint32
+ for i = 0; i < ncpu; i++ {
+ mib := []int32{ctlKern, kernCPUStats, int32(i)}
+ buf, _, err := common.CallSyscall(mib)
+ if err != nil {
+ return ret, err
+ }
+
+ stats := (*cpuStats)(unsafe.Pointer(&buf[0]))
+ if (stats.Flags & cpuOnline) == 0 {
+ continue
+ }
+ ret = append(ret, TimesStat{
+ CPU: fmt.Sprintf("cpu%d", i),
+ User: float64(stats.User) / ClocksPerSec,
+ Nice: float64(stats.Nice) / ClocksPerSec,
+ System: float64(stats.Sys) / ClocksPerSec,
+ Idle: float64(stats.Idle) / ClocksPerSec,
+ Irq: float64(stats.Intr) / ClocksPerSec,
+ })
+ }
+
+ return ret, nil
+}
+
+// Returns only one (minimal) CPUInfoStat on OpenBSD
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ var ret []InfoStat
+ var err error
+
+ c := InfoStat{}
+
+ mhz, err := unix.SysctlUint32("hw.cpuspeed")
+ if err != nil {
+ return nil, err
+ }
+ c.Mhz = float64(mhz)
+
+ ncpu, err := unix.SysctlUint32("hw.ncpuonline")
+ if err != nil {
+ return nil, err
+ }
+ c.Cores = int32(ncpu)
+
+ if c.ModelName, err = unix.Sysctl("hw.model"); err != nil {
+ return nil, err
+ }
+
+ return append(ret, c), nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ return runtime.NumCPU(), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go
new file mode 100644
index 000000000..5e878399a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go
@@ -0,0 +1,10 @@
+package cpu
+
+type cpuTimes struct {
+ User uint32
+ Nice uint32
+ Sys uint32
+ Spin uint32
+ Intr uint32
+ Idle uint32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go
new file mode 100644
index 000000000..d659058cd
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go
@@ -0,0 +1,10 @@
+package cpu
+
+type cpuTimes struct {
+ User uint64
+ Nice uint64
+ Sys uint64
+ Spin uint64
+ Intr uint64
+ Idle uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go
new file mode 100644
index 000000000..5e878399a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go
@@ -0,0 +1,10 @@
+package cpu
+
+type cpuTimes struct {
+ User uint32
+ Nice uint32
+ Sys uint32
+ Spin uint32
+ Intr uint32
+ Idle uint32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go
new file mode 100644
index 000000000..d659058cd
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go
@@ -0,0 +1,10 @@
+package cpu
+
+type cpuTimes struct {
+ User uint64
+ Nice uint64
+ Sys uint64
+ Spin uint64
+ Intr uint64
+ Idle uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go
new file mode 100644
index 000000000..a2e99d8c0
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go
@@ -0,0 +1,50 @@
+//go:build plan9
+// +build plan9
+
+package cpu
+
+import (
+ "context"
+ "os"
+ "runtime"
+
+ stats "github.com/lufia/plan9stats"
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ // BUG: percpu flag is not supported yet.
+ root := os.Getenv("HOST_ROOT")
+ c, err := stats.ReadCPUType(ctx, stats.WithRootDir(root))
+ if err != nil {
+ return nil, err
+ }
+ s, err := stats.ReadCPUStats(ctx, stats.WithRootDir(root))
+ if err != nil {
+ return nil, err
+ }
+ return []TimesStat{
+ {
+ CPU: c.Name,
+ User: s.User.Seconds(),
+ System: s.Sys.Seconds(),
+ Idle: s.Idle.Seconds(),
+ },
+ }, nil
+}
+
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ return []InfoStat{}, common.ErrNotImplementedError
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ return runtime.NumCPU(), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go
new file mode 100644
index 000000000..f828c843e
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go
@@ -0,0 +1,268 @@
+package cpu
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/tklauser/go-sysconf"
+)
+
+var ClocksPerSec = float64(128)
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ ClocksPerSec = float64(clkTck)
+ }
+}
+
+// sum all values in a float64 map with float64 keys
+func msum(x map[float64]float64) float64 {
+ total := 0.0
+ for _, y := range x {
+ total += y
+ }
+ return total
+}
+
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/")
+ if err != nil {
+ return nil, fmt.Errorf("cannot execute kstat: %s", err)
+ }
+ cpu := make(map[float64]float64)
+ idle := make(map[float64]float64)
+ user := make(map[float64]float64)
+ kern := make(map[float64]float64)
+ iowt := make(map[float64]float64)
+ // swap := make(map[float64]float64)
+ re := regexp.MustCompile(`[:\s]+`)
+ for _, line := range strings.Split(string(kstatSysOut), "\n") {
+ fields := re.Split(line, -1)
+ if fields[0] != "cpu_stat" {
+ continue
+ }
+ cpuNumber, err := strconv.ParseFloat(fields[1], 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse cpu number: %s", err)
+ }
+ cpu[cpuNumber] = cpuNumber
+ switch fields[3] {
+ case "idle":
+ idle[cpuNumber], err = strconv.ParseFloat(fields[4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse idle: %s", err)
+ }
+ case "user":
+ user[cpuNumber], err = strconv.ParseFloat(fields[4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse user: %s", err)
+ }
+ case "kernel":
+ kern[cpuNumber], err = strconv.ParseFloat(fields[4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse kernel: %s", err)
+ }
+ case "iowait":
+ iowt[cpuNumber], err = strconv.ParseFloat(fields[4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse iowait: %s", err)
+ }
+ //not sure how this translates, don't report, add to kernel, something else?
+ /*case "swap":
+ swap[cpuNumber], err = strconv.ParseFloat(fields[4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse swap: %s", err)
+ } */
+ }
+ }
+ ret := make([]TimesStat, 0, len(cpu))
+ if percpu {
+ for _, c := range cpu {
+ ct := &TimesStat{
+ CPU: fmt.Sprintf("cpu%d", int(cpu[c])),
+ Idle: idle[c] / ClocksPerSec,
+ User: user[c] / ClocksPerSec,
+ System: kern[c] / ClocksPerSec,
+ Iowait: iowt[c] / ClocksPerSec,
+ }
+ ret = append(ret, *ct)
+ }
+ } else {
+ ct := &TimesStat{
+ CPU: "cpu-total",
+ Idle: msum(idle) / ClocksPerSec,
+ User: msum(user) / ClocksPerSec,
+ System: msum(kern) / ClocksPerSec,
+ Iowait: msum(iowt) / ClocksPerSec,
+ }
+ ret = append(ret, *ct)
+ }
+ return ret, nil
+}
+
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ psrInfoOut, err := invoke.CommandWithContext(ctx, "psrinfo", "-p", "-v")
+ if err != nil {
+ return nil, fmt.Errorf("cannot execute psrinfo: %s", err)
+ }
+
+ procs, err := parseProcessorInfo(string(psrInfoOut))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing psrinfo output: %s", err)
+ }
+
+ isaInfoOut, err := invoke.CommandWithContext(ctx, "isainfo", "-b", "-v")
+ if err != nil {
+ return nil, fmt.Errorf("cannot execute isainfo: %s", err)
+ }
+
+ flags, err := parseISAInfo(string(isaInfoOut))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing isainfo output: %s", err)
+ }
+
+ result := make([]InfoStat, 0, len(flags))
+ for _, proc := range procs {
+ procWithFlags := proc
+ procWithFlags.Flags = flags
+ result = append(result, procWithFlags)
+ }
+
+ return result, nil
+}
+
+var flagsMatch = regexp.MustCompile(`[\w\.]+`)
+
+func parseISAInfo(cmdOutput string) ([]string, error) {
+ words := flagsMatch.FindAllString(cmdOutput, -1)
+
+ // Sanity check the output
+ if len(words) < 4 || words[1] != "bit" || words[3] != "applications" {
+ return nil, errors.New("attempted to parse invalid isainfo output")
+ }
+
+ flags := make([]string, len(words)-4)
+ for i, val := range words[4:] {
+ flags[i] = val
+ }
+ sort.Strings(flags)
+
+ return flags, nil
+}
+
+var psrInfoMatch = regexp.MustCompile(`The physical processor has (?:([\d]+) virtual processors? \(([\d-]+)\)|([\d]+) cores and ([\d]+) virtual processors[^\n]+)\n(?:\s+ The core has.+\n)*\s+.+ \((\w+) ([\S]+) family (.+) model (.+) step (.+) clock (.+) MHz\)\n[\s]*(.*)`)
+
+const (
+ psrNumCoresOffset = 1
+ psrNumCoresHTOffset = 3
+ psrNumHTOffset = 4
+ psrVendorIDOffset = 5
+ psrFamilyOffset = 7
+ psrModelOffset = 8
+ psrStepOffset = 9
+ psrClockOffset = 10
+ psrModelNameOffset = 11
+)
+
+func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) {
+ matches := psrInfoMatch.FindAllStringSubmatch(cmdOutput, -1)
+
+ var infoStatCount int32
+ result := make([]InfoStat, 0, len(matches))
+ for physicalIndex, physicalCPU := range matches {
+ var step int32
+ var clock float64
+
+ if physicalCPU[psrStepOffset] != "" {
+ stepParsed, err := strconv.ParseInt(physicalCPU[psrStepOffset], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %s", physicalCPU[9], err)
+ }
+ step = int32(stepParsed)
+ }
+
+ if physicalCPU[psrClockOffset] != "" {
+ clockParsed, err := strconv.ParseInt(physicalCPU[psrClockOffset], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %s", physicalCPU[10], err)
+ }
+ clock = float64(clockParsed)
+ }
+
+ var err error
+ var numCores int64
+ var numHT int64
+ switch {
+ case physicalCPU[psrNumCoresOffset] != "":
+ numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresOffset], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[1], err)
+ }
+
+ for i := 0; i < int(numCores); i++ {
+ result = append(result, InfoStat{
+ CPU: infoStatCount,
+ PhysicalID: strconv.Itoa(physicalIndex),
+ CoreID: strconv.Itoa(i),
+ Cores: 1,
+ VendorID: physicalCPU[psrVendorIDOffset],
+ ModelName: physicalCPU[psrModelNameOffset],
+ Family: physicalCPU[psrFamilyOffset],
+ Model: physicalCPU[psrModelOffset],
+ Stepping: step,
+ Mhz: clock,
+ })
+ infoStatCount++
+ }
+ case physicalCPU[psrNumCoresHTOffset] != "":
+ numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresHTOffset], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[3], err)
+ }
+
+ numHT, err = strconv.ParseInt(physicalCPU[psrNumHTOffset], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %s", physicalCPU[4], err)
+ }
+
+ for i := 0; i < int(numCores); i++ {
+ result = append(result, InfoStat{
+ CPU: infoStatCount,
+ PhysicalID: strconv.Itoa(physicalIndex),
+ CoreID: strconv.Itoa(i),
+ Cores: int32(numHT) / int32(numCores),
+ VendorID: physicalCPU[psrVendorIDOffset],
+ ModelName: physicalCPU[psrModelNameOffset],
+ Family: physicalCPU[psrFamilyOffset],
+ Model: physicalCPU[psrModelOffset],
+ Stepping: step,
+ Mhz: clock,
+ })
+ infoStatCount++
+ }
+ default:
+ return nil, errors.New("values for cores with and without hyperthreading are both set")
+ }
+ }
+ return result, nil
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ return runtime.NumCPU(), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go
new file mode 100644
index 000000000..d1a0e4cdb
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go
@@ -0,0 +1,233 @@
+//go:build windows
+// +build windows
+
+package cpu
+
+import (
+ "context"
+ "fmt"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/yusufpapurcu/wmi"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ procGetActiveProcessorCount = common.Modkernel32.NewProc("GetActiveProcessorCount")
+ procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo")
+)
+
+type win32_Processor struct {
+ Family uint16
+ Manufacturer string
+ Name string
+ NumberOfLogicalProcessors uint32
+ NumberOfCores uint32
+ ProcessorID *string
+ Stepping *string
+ MaxClockSpeed uint32
+}
+
+// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION
+// defined in windows api doc with the following
+// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/nf-winternl-ntquerysysteminformation#system_processor_performance_information
+// additional fields documented here
+// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/processor_performance.htm
+type win32_SystemProcessorPerformanceInformation struct {
+ IdleTime int64 // idle time in 100ns (this is not a filetime).
+ KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime).
+ UserTime int64 // usertime in 100ns (this is not a filetime).
+ DpcTime int64 // dpc time in 100ns (this is not a filetime).
+ InterruptTime int64 // interrupt time in 100ns
+ InterruptCount uint32
+}
+
+const (
+ ClocksPerSec = 10000000.0
+
+ // systemProcessorPerformanceInformationClass information class to query with NTQuerySystemInformation
+ // https://processhacker.sourceforge.io/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0
+ win32_SystemProcessorPerformanceInformationClass = 8
+
+ // size of systemProcessorPerformanceInfoSize in memory
+ win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{}))
+)
+
+// Times returns times stat per cpu and combined for all CPUs
+func Times(percpu bool) ([]TimesStat, error) {
+ return TimesWithContext(context.Background(), percpu)
+}
+
+func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ if percpu {
+ return perCPUTimes()
+ }
+
+ var ret []TimesStat
+ var lpIdleTime common.FILETIME
+ var lpKernelTime common.FILETIME
+ var lpUserTime common.FILETIME
+ r, _, _ := common.ProcGetSystemTimes.Call(
+ uintptr(unsafe.Pointer(&lpIdleTime)),
+ uintptr(unsafe.Pointer(&lpKernelTime)),
+ uintptr(unsafe.Pointer(&lpUserTime)))
+ if r == 0 {
+ return ret, windows.GetLastError()
+ }
+
+ LOT := float64(0.0000001)
+ HIT := (LOT * 4294967296.0)
+ idle := ((HIT * float64(lpIdleTime.DwHighDateTime)) + (LOT * float64(lpIdleTime.DwLowDateTime)))
+ user := ((HIT * float64(lpUserTime.DwHighDateTime)) + (LOT * float64(lpUserTime.DwLowDateTime)))
+ kernel := ((HIT * float64(lpKernelTime.DwHighDateTime)) + (LOT * float64(lpKernelTime.DwLowDateTime)))
+ system := (kernel - idle)
+
+ ret = append(ret, TimesStat{
+ CPU: "cpu-total",
+ Idle: float64(idle),
+ User: float64(user),
+ System: float64(system),
+ })
+ return ret, nil
+}
+
+func Info() ([]InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
+ var ret []InfoStat
+ var dst []win32_Processor
+ q := wmi.CreateQuery(&dst, "")
+ if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil {
+ return ret, err
+ }
+
+ var procID string
+ for i, l := range dst {
+ procID = ""
+ if l.ProcessorID != nil {
+ procID = *l.ProcessorID
+ }
+
+ cpu := InfoStat{
+ CPU: int32(i),
+ Family: fmt.Sprintf("%d", l.Family),
+ VendorID: l.Manufacturer,
+ ModelName: l.Name,
+ Cores: int32(l.NumberOfLogicalProcessors),
+ PhysicalID: procID,
+ Mhz: float64(l.MaxClockSpeed),
+ Flags: []string{},
+ }
+ ret = append(ret, cpu)
+ }
+
+ return ret, nil
+}
+
+// perCPUTimes returns times stat per cpu, per core and overall for all CPUs
+func perCPUTimes() ([]TimesStat, error) {
+ var ret []TimesStat
+ stats, err := perfInfo()
+ if err != nil {
+ return nil, err
+ }
+ for core, v := range stats {
+ c := TimesStat{
+ CPU: fmt.Sprintf("cpu%d", core),
+ User: float64(v.UserTime) / ClocksPerSec,
+ System: float64(v.KernelTime-v.IdleTime) / ClocksPerSec,
+ Idle: float64(v.IdleTime) / ClocksPerSec,
+ Irq: float64(v.InterruptTime) / ClocksPerSec,
+ }
+ ret = append(ret, c)
+ }
+ return ret, nil
+}
+
+// makes call to Windows API function to retrieve performance information for each core
+func perfInfo() ([]win32_SystemProcessorPerformanceInformation, error) {
+ // Make maxResults large for safety.
+ // We can't invoke the api call with a results array that's too small.
+ // If we have more than 2056 cores on a single host, then it's probably the future.
+ maxBuffer := 2056
+ // buffer for results from the windows proc
+ resultBuffer := make([]win32_SystemProcessorPerformanceInformation, maxBuffer)
+ // size of the buffer in memory
+ bufferSize := uintptr(win32_SystemProcessorPerformanceInfoSize) * uintptr(maxBuffer)
+ // size of the returned response
+ var retSize uint32
+
+ // Invoke windows api proc.
+ // The returned err from the windows dll proc will always be non-nil even when successful.
+ // See https://godoc.org/golang.org/x/sys/windows#LazyProc.Call for more information
+ retCode, _, err := common.ProcNtQuerySystemInformation.Call(
+ win32_SystemProcessorPerformanceInformationClass, // System Information Class -> SystemProcessorPerformanceInformation
+ uintptr(unsafe.Pointer(&resultBuffer[0])), // pointer to first element in result buffer
+ bufferSize, // size of the buffer in memory
+ uintptr(unsafe.Pointer(&retSize)), // pointer to the size of the returned results the windows proc will set this
+ )
+
+ // check return code for errors
+ if retCode != 0 {
+ return nil, fmt.Errorf("call to NtQuerySystemInformation returned %d. err: %s", retCode, err.Error())
+ }
+
+ // calculate the number of returned elements based on the returned size
+ numReturnedElements := retSize / win32_SystemProcessorPerformanceInfoSize
+
+ // trim results to the number of returned elements
+ resultBuffer = resultBuffer[:numReturnedElements]
+
+ return resultBuffer, nil
+}
+
+// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API.
+// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396
+// https://github.com/elastic/go-windows/blob/bb1581babc04d5cb29a2bfa7a9ac6781c730c8dd/kernel32.go#L43
+type systemInfo struct {
+ wProcessorArchitecture uint16
+ wReserved uint16
+ dwPageSize uint32
+ lpMinimumApplicationAddress uintptr
+ lpMaximumApplicationAddress uintptr
+ dwActiveProcessorMask uintptr
+ dwNumberOfProcessors uint32
+ dwProcessorType uint32
+ dwAllocationGranularity uint32
+ wProcessorLevel uint16
+ wProcessorRevision uint16
+}
+
+func CountsWithContext(ctx context.Context, logical bool) (int, error) {
+ if logical {
+ // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97
+ err := procGetActiveProcessorCount.Find()
+ if err == nil { // Win7+
+ ret, _, _ := procGetActiveProcessorCount.Call(uintptr(0xffff)) // ALL_PROCESSOR_GROUPS is 0xffff according to Rust's winapi lib https://docs.rs/winapi/*/x86_64-pc-windows-msvc/src/winapi/shared/ntdef.rs.html#120
+ if ret != 0 {
+ return int(ret), nil
+ }
+ }
+ var systemInfo systemInfo
+ _, _, err = procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo)))
+ if systemInfo.dwNumberOfProcessors == 0 {
+ return 0, err
+ }
+ return int(systemInfo.dwNumberOfProcessors), nil
+ }
+ // physical cores https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499
+ // for the time being, try with unreliable and slow WMI call…
+ var dst []win32_Processor
+ q := wmi.CreateQuery(&dst, "")
+ if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil {
+ return 0, err
+ }
+ var count uint32
+ for _, d := range dst {
+ count += d.NumberOfCores
+ }
+ return int(count), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk.go
new file mode 100644
index 000000000..dd4cc1d5f
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk.go
@@ -0,0 +1,96 @@
+package disk
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var invoke common.Invoker = common.Invoke{}
+
+type UsageStat struct {
+ Path string `json:"path"`
+ Fstype string `json:"fstype"`
+ Total uint64 `json:"total"`
+ Free uint64 `json:"free"`
+ Used uint64 `json:"used"`
+ UsedPercent float64 `json:"usedPercent"`
+ InodesTotal uint64 `json:"inodesTotal"`
+ InodesUsed uint64 `json:"inodesUsed"`
+ InodesFree uint64 `json:"inodesFree"`
+ InodesUsedPercent float64 `json:"inodesUsedPercent"`
+}
+
+type PartitionStat struct {
+ Device string `json:"device"`
+ Mountpoint string `json:"mountpoint"`
+ Fstype string `json:"fstype"`
+ Opts []string `json:"opts"`
+}
+
+type IOCountersStat struct {
+ ReadCount uint64 `json:"readCount"`
+ MergedReadCount uint64 `json:"mergedReadCount"`
+ WriteCount uint64 `json:"writeCount"`
+ MergedWriteCount uint64 `json:"mergedWriteCount"`
+ ReadBytes uint64 `json:"readBytes"`
+ WriteBytes uint64 `json:"writeBytes"`
+ ReadTime uint64 `json:"readTime"`
+ WriteTime uint64 `json:"writeTime"`
+ IopsInProgress uint64 `json:"iopsInProgress"`
+ IoTime uint64 `json:"ioTime"`
+ WeightedIO uint64 `json:"weightedIO"`
+ Name string `json:"name"`
+ SerialNumber string `json:"serialNumber"`
+ Label string `json:"label"`
+}
+
+func (d UsageStat) String() string {
+ s, _ := json.Marshal(d)
+ return string(s)
+}
+
+func (d PartitionStat) String() string {
+ s, _ := json.Marshal(d)
+ return string(s)
+}
+
+func (d IOCountersStat) String() string {
+ s, _ := json.Marshal(d)
+ return string(s)
+}
+
+// Usage returns a file system usage. path is a filesystem path such
+// as "/", not device file path like "/dev/vda1". If you want to use
+// a return value of disk.Partitions, use "Mountpoint" not "Device".
+func Usage(path string) (*UsageStat, error) {
+ return UsageWithContext(context.Background(), path)
+}
+
+// Partitions returns disk partitions. If all is false, returns
+// physical devices only (e.g. hard disks, cd-rom drives, USB keys)
+// and ignore all others (e.g. memory partitions such as /dev/shm)
+//
+// 'all' argument is ignored for BSD, see: https://github.com/giampaolo/psutil/issues/906
+func Partitions(all bool) ([]PartitionStat, error) {
+ return PartitionsWithContext(context.Background(), all)
+}
+
+func IOCounters(names ...string) (map[string]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), names...)
+}
+
+// SerialNumber returns Serial Number of given device or empty string
+// on error. Name of device is expected, eg. /dev/sda
+func SerialNumber(name string) (string, error) {
+ return SerialNumberWithContext(context.Background(), name)
+}
+
+// Label returns label of given device or empty string on error.
+// Name of device is expected, eg. /dev/sda
+// Supports label based on devicemapper name
+// See https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-block-dm
+func Label(name string) (string, error) {
+ return LabelWithContext(context.Background(), name)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go
new file mode 100644
index 000000000..bc71712ea
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go
@@ -0,0 +1,22 @@
+//go:build aix
+// +build aix
+
+package disk
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go
new file mode 100644
index 000000000..aa534df30
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go
@@ -0,0 +1,76 @@
+//go:build aix && cgo
+// +build aix,cgo
+
+package disk
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/power-devops/perfstat"
+)
+
+var FSType map[int]string
+
+func init() {
+ FSType = map[int]string{
+ 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc",
+ 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs",
+ 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs",
+ 39: "ahafs", 40: "sterm-nfs", 41: "asmfs",
+ }
+}
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ f, err := perfstat.FileSystemStat()
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]PartitionStat, len(f))
+
+ for _, fs := range f {
+ fstyp, exists := FSType[fs.FSType]
+ if !exists {
+ fstyp = "unknown"
+ }
+ info := PartitionStat{
+ Device: fs.Device,
+ Mountpoint: fs.MountPoint,
+ Fstype: fstyp,
+ }
+ ret = append(ret, info)
+ }
+
+ return ret, err
+}
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ f, err := perfstat.FileSystemStat()
+ if err != nil {
+ return nil, err
+ }
+
+ blocksize := uint64(512)
+ for _, fs := range f {
+ if path == fs.MountPoint {
+ fstyp, exists := FSType[fs.FSType]
+ if !exists {
+ fstyp = "unknown"
+ }
+ info := UsageStat{
+ Path: path,
+ Fstype: fstyp,
+ Total: uint64(fs.TotalBlocks) * blocksize,
+ Free: uint64(fs.FreeBlocks) * blocksize,
+ Used: uint64(fs.TotalBlocks-fs.FreeBlocks) * blocksize,
+ InodesTotal: uint64(fs.TotalInodes),
+ InodesFree: uint64(fs.FreeInodes),
+ InodesUsed: uint64(fs.TotalInodes - fs.FreeInodes),
+ }
+ info.UsedPercent = (float64(info.Used) / float64(info.Total)) * 100.0
+ info.InodesUsedPercent = (float64(info.InodesUsed) / float64(info.InodesTotal)) * 100.0
+ return &info, nil
+ }
+ }
+ return nil, fmt.Errorf("mountpoint %s not found", path)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go
new file mode 100644
index 000000000..eb25cbdae
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go
@@ -0,0 +1,18 @@
+//go:build aix && !cgo
+// +build aix,!cgo
+
+package disk
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ return []PartitionStat{}, common.ErrNotImplementedError
+}
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go
new file mode 100644
index 000000000..0877b7611
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go
@@ -0,0 +1,87 @@
+//go:build darwin
+// +build darwin
+
+package disk
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+// PartitionsWithContext returns disk partition.
+// 'all' argument is ignored, see: https://github.com/giampaolo/psutil/issues/906
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ var ret []PartitionStat
+
+ count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
+ if err != nil {
+ return ret, err
+ }
+ fs := make([]unix.Statfs_t, count)
+ if _, err = unix.Getfsstat(fs, unix.MNT_WAIT); err != nil {
+ return ret, err
+ }
+ for _, stat := range fs {
+ opts := []string{"rw"}
+ if stat.Flags&unix.MNT_RDONLY != 0 {
+ opts = []string{"ro"}
+ }
+ if stat.Flags&unix.MNT_SYNCHRONOUS != 0 {
+ opts = append(opts, "sync")
+ }
+ if stat.Flags&unix.MNT_NOEXEC != 0 {
+ opts = append(opts, "noexec")
+ }
+ if stat.Flags&unix.MNT_NOSUID != 0 {
+ opts = append(opts, "nosuid")
+ }
+ if stat.Flags&unix.MNT_UNION != 0 {
+ opts = append(opts, "union")
+ }
+ if stat.Flags&unix.MNT_ASYNC != 0 {
+ opts = append(opts, "async")
+ }
+ if stat.Flags&unix.MNT_DONTBROWSE != 0 {
+ opts = append(opts, "nobrowse")
+ }
+ if stat.Flags&unix.MNT_AUTOMOUNTED != 0 {
+ opts = append(opts, "automounted")
+ }
+ if stat.Flags&unix.MNT_JOURNALED != 0 {
+ opts = append(opts, "journaled")
+ }
+ if stat.Flags&unix.MNT_MULTILABEL != 0 {
+ opts = append(opts, "multilabel")
+ }
+ if stat.Flags&unix.MNT_NOATIME != 0 {
+ opts = append(opts, "noatime")
+ }
+ if stat.Flags&unix.MNT_NODEV != 0 {
+ opts = append(opts, "nodev")
+ }
+ d := PartitionStat{
+ Device: common.ByteToString(stat.Mntfromname[:]),
+ Mountpoint: common.ByteToString(stat.Mntonname[:]),
+ Fstype: common.ByteToString(stat.Fstypename[:]),
+ Opts: opts,
+ }
+
+ ret = append(ret, d)
+ }
+
+ return ret, nil
+}
+
+func getFsType(stat unix.Statfs_t) string {
+ return common.ByteToString(stat.Fstypename[:])
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go
new file mode 100644
index 000000000..b041c8d72
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go
@@ -0,0 +1,45 @@
+//go:build darwin && cgo
+// +build darwin,cgo
+
+package disk
+
+/*
+#cgo LDFLAGS: -framework CoreFoundation -framework IOKit
+#include
+#include
+#include "iostat_darwin.h"
+*/
+import "C"
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ var buf [C.NDRIVE]C.DriveStats
+ n, err := C.gopsutil_v3_readdrivestat(&buf[0], C.int(len(buf)))
+ if err != nil {
+ return nil, err
+ }
+ ret := make(map[string]IOCountersStat, 0)
+ for i := 0; i < int(n); i++ {
+ d := IOCountersStat{
+ ReadBytes: uint64(buf[i].read),
+ WriteBytes: uint64(buf[i].written),
+ ReadCount: uint64(buf[i].nread),
+ WriteCount: uint64(buf[i].nwrite),
+ ReadTime: uint64(buf[i].readtime / 1000 / 1000), // note: read/write time are in ns, but we want ms.
+ WriteTime: uint64(buf[i].writetime / 1000 / 1000),
+ IoTime: uint64((buf[i].readtime + buf[i].writetime) / 1000 / 1000),
+ Name: C.GoString(&buf[i].name[0]),
+ }
+ if len(names) > 0 && !common.StringsHas(names, d.Name) {
+ continue
+ }
+
+ ret[d.Name] = d
+ }
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go
new file mode 100644
index 000000000..99bb8ba24
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go
@@ -0,0 +1,14 @@
+//go:build darwin && !cgo
+// +build darwin,!cgo
+
+package disk
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go
new file mode 100644
index 000000000..476873340
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go
@@ -0,0 +1,30 @@
+//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !aix
+// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!aix
+
+package disk
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ return []PartitionStat{}, common.ErrNotImplementedError
+}
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go
new file mode 100644
index 000000000..753ce9ace
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go
@@ -0,0 +1,193 @@
+//go:build freebsd
+// +build freebsd
+
+package disk
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+// PartitionsWithContext returns disk partition.
+// 'all' argument is ignored, see: https://github.com/giampaolo/psutil/issues/906
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ var ret []PartitionStat
+
+ // get length
+ count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
+ if err != nil {
+ return ret, err
+ }
+
+ fs := make([]unix.Statfs_t, count)
+ if _, err = unix.Getfsstat(fs, unix.MNT_WAIT); err != nil {
+ return ret, err
+ }
+
+ for _, stat := range fs {
+ opts := []string{"rw"}
+ if stat.Flags&unix.MNT_RDONLY != 0 {
+ opts = []string{"ro"}
+ }
+ if stat.Flags&unix.MNT_SYNCHRONOUS != 0 {
+ opts = append(opts, "sync")
+ }
+ if stat.Flags&unix.MNT_NOEXEC != 0 {
+ opts = append(opts, "noexec")
+ }
+ if stat.Flags&unix.MNT_NOSUID != 0 {
+ opts = append(opts, "nosuid")
+ }
+ if stat.Flags&unix.MNT_UNION != 0 {
+ opts = append(opts, "union")
+ }
+ if stat.Flags&unix.MNT_ASYNC != 0 {
+ opts = append(opts, "async")
+ }
+ if stat.Flags&unix.MNT_SUIDDIR != 0 {
+ opts = append(opts, "suiddir")
+ }
+ if stat.Flags&unix.MNT_SOFTDEP != 0 {
+ opts = append(opts, "softdep")
+ }
+ if stat.Flags&unix.MNT_NOSYMFOLLOW != 0 {
+ opts = append(opts, "nosymfollow")
+ }
+ if stat.Flags&unix.MNT_GJOURNAL != 0 {
+ opts = append(opts, "gjournal")
+ }
+ if stat.Flags&unix.MNT_MULTILABEL != 0 {
+ opts = append(opts, "multilabel")
+ }
+ if stat.Flags&unix.MNT_ACLS != 0 {
+ opts = append(opts, "acls")
+ }
+ if stat.Flags&unix.MNT_NOATIME != 0 {
+ opts = append(opts, "noatime")
+ }
+ if stat.Flags&unix.MNT_NOCLUSTERR != 0 {
+ opts = append(opts, "noclusterr")
+ }
+ if stat.Flags&unix.MNT_NOCLUSTERW != 0 {
+ opts = append(opts, "noclusterw")
+ }
+ if stat.Flags&unix.MNT_NFS4ACLS != 0 {
+ opts = append(opts, "nfsv4acls")
+ }
+
+ d := PartitionStat{
+ Device: common.ByteToString(stat.Mntfromname[:]),
+ Mountpoint: common.ByteToString(stat.Mntonname[:]),
+ Fstype: common.ByteToString(stat.Fstypename[:]),
+ Opts: opts,
+ }
+
+ ret = append(ret, d)
+ }
+
+ return ret, nil
+}
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ // statinfo->devinfo->devstat
+ // /usr/include/devinfo.h
+ ret := make(map[string]IOCountersStat)
+
+ r, err := unix.Sysctl("kern.devstat.all")
+ if err != nil {
+ return nil, err
+ }
+ buf := []byte(r)
+ length := len(buf)
+
+ count := int(uint64(length) / uint64(sizeOfdevstat))
+
+ buf = buf[8:] // devstat.all has version in the head.
+ // parse buf to devstat
+ for i := 0; i < count; i++ {
+ b := buf[i*sizeOfdevstat : i*sizeOfdevstat+sizeOfdevstat]
+ d, err := parsedevstat(b)
+ if err != nil {
+ continue
+ }
+ un := strconv.Itoa(int(d.Unit_number))
+ name := common.IntToString(d.Device_name[:]) + un
+
+ if len(names) > 0 && !common.StringsHas(names, name) {
+ continue
+ }
+
+ ds := IOCountersStat{
+ ReadCount: d.Operations[devstat_READ],
+ WriteCount: d.Operations[devstat_WRITE],
+ ReadBytes: d.Bytes[devstat_READ],
+ WriteBytes: d.Bytes[devstat_WRITE],
+ ReadTime: uint64(d.Duration[devstat_READ].Compute() * 1000),
+ WriteTime: uint64(d.Duration[devstat_WRITE].Compute() * 1000),
+ IoTime: uint64(d.Busy_time.Compute() * 1000),
+ Name: name,
+ }
+ ds.SerialNumber, _ = SerialNumberWithContext(ctx, name)
+ ret[name] = ds
+ }
+
+ return ret, nil
+}
+
+func (b bintime) Compute() float64 {
+ BINTIME_SCALE := 5.42101086242752217003726400434970855712890625e-20
+ return float64(b.Sec) + float64(b.Frac)*BINTIME_SCALE
+}
+
+// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE)
+
+func parsedevstat(buf []byte) (devstat, error) {
+ var ds devstat
+ br := bytes.NewReader(buf)
+ // err := binary.Read(br, binary.LittleEndian, &ds)
+ err := common.Read(br, binary.LittleEndian, &ds)
+ if err != nil {
+ return ds, err
+ }
+
+ return ds, nil
+}
+
+func getFsType(stat unix.Statfs_t) string {
+ return common.ByteToString(stat.Fstypename[:])
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ geomOut, err := invoke.CommandWithContext(ctx, "geom", "disk", "list", name)
+ if err != nil {
+ return "", fmt.Errorf("exec geom: %w", err)
+ }
+ s := bufio.NewScanner(bytes.NewReader(geomOut))
+ serial := ""
+ for s.Scan() {
+ flds := strings.Fields(s.Text())
+ if len(flds) == 2 && flds[0] == "ident:" {
+ if flds[1] != "(null)" {
+ serial = flds[1]
+ }
+ break
+ }
+ }
+ if err = s.Err(); err != nil {
+ return "", err
+ }
+ return serial, nil
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_386.go
new file mode 100644
index 000000000..7fa1783dc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_386.go
@@ -0,0 +1,63 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package disk
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeofLongDouble = 0x8
+
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfdevstat = 0xf0
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+ _C_long_double int64
+)
+
+type devstat struct {
+ Sequence0 uint32
+ Allocated int32
+ Start_count uint32
+ End_count uint32
+ Busy_from bintime
+ Dev_links _Ctype_struct___0
+ Device_number uint32
+ Device_name [16]int8
+ Unit_number int32
+ Bytes [4]uint64
+ Operations [4]uint64
+ Duration [4]bintime
+ Busy_time bintime
+ Creation_time bintime
+ Block_size uint32
+ Tag_types [3]uint64
+ Flags uint32
+ Device_type uint32
+ Priority uint32
+ Id *byte
+ Sequence1 uint32
+}
+
+type bintime struct {
+ Sec int32
+ Frac uint64
+}
+
+type _Ctype_struct___0 struct {
+ Empty uint32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_amd64.go
new file mode 100644
index 000000000..d86a308be
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_amd64.go
@@ -0,0 +1,66 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package disk
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeofLongDouble = 0x8
+
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfdevstat = 0x120
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+ _C_long_double int64
+)
+
+type devstat struct {
+ Sequence0 uint32
+ Allocated int32
+ Start_count uint32
+ End_count uint32
+ Busy_from bintime
+ Dev_links _Ctype_struct___0
+ Device_number uint32
+ Device_name [16]int8
+ Unit_number int32
+ Bytes [4]uint64
+ Operations [4]uint64
+ Duration [4]bintime
+ Busy_time bintime
+ Creation_time bintime
+ Block_size uint32
+ Pad_cgo_0 [4]byte
+ Tag_types [3]uint64
+ Flags uint32
+ Device_type uint32
+ Priority uint32
+ Pad_cgo_1 [4]byte
+ ID *byte
+ Sequence1 uint32
+ Pad_cgo_2 [4]byte
+}
+
+type bintime struct {
+ Sec int64
+ Frac uint64
+}
+
+type _Ctype_struct___0 struct {
+ Empty uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm.go
new file mode 100644
index 000000000..7fa1783dc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm.go
@@ -0,0 +1,63 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package disk
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeofLongDouble = 0x8
+
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfdevstat = 0xf0
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+ _C_long_double int64
+)
+
+type devstat struct {
+ Sequence0 uint32
+ Allocated int32
+ Start_count uint32
+ End_count uint32
+ Busy_from bintime
+ Dev_links _Ctype_struct___0
+ Device_number uint32
+ Device_name [16]int8
+ Unit_number int32
+ Bytes [4]uint64
+ Operations [4]uint64
+ Duration [4]bintime
+ Busy_time bintime
+ Creation_time bintime
+ Block_size uint32
+ Tag_types [3]uint64
+ Flags uint32
+ Device_type uint32
+ Priority uint32
+ Id *byte
+ Sequence1 uint32
+}
+
+type bintime struct {
+ Sec int32
+ Frac uint64
+}
+
+type _Ctype_struct___0 struct {
+ Empty uint32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm64.go
new file mode 100644
index 000000000..f6b3f80df
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd_arm64.go
@@ -0,0 +1,66 @@
+//go:build freebsd && arm64
+// +build freebsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs disk/types_freebsd.go
+
+package disk
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeofLongDouble = 0x8
+
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfdevstat = 0x120
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+ _C_long_double int64
+)
+
+type devstat struct {
+ Sequence0 uint32
+ Allocated int32
+ Start_count uint32
+ End_count uint32
+ Busy_from bintime
+ Dev_links _Ctype_struct___0
+ Device_number uint32
+ Device_name [16]int8
+ Unit_number int32
+ Bytes [4]uint64
+ Operations [4]uint64
+ Duration [4]bintime
+ Busy_time bintime
+ Creation_time bintime
+ Block_size uint32
+ Tag_types [3]uint64
+ Flags uint32
+ Device_type uint32
+ Priority uint32
+ Id *byte
+ Sequence1 uint32
+ Pad_cgo_0 [4]byte
+}
+type bintime struct {
+ Sec int64
+ Frac uint64
+}
+
+type _Ctype_struct___0 struct {
+ Empty uint64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go
new file mode 100644
index 000000000..3911af9c6
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go
@@ -0,0 +1,538 @@
+//go:build linux
+// +build linux
+
+package disk
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sectorSize = 512
+)
+
+const (
+ // man statfs
+ ADFS_SUPER_MAGIC = 0xadf5
+ AFFS_SUPER_MAGIC = 0xADFF
+ BDEVFS_MAGIC = 0x62646576
+ BEFS_SUPER_MAGIC = 0x42465331
+ BFS_MAGIC = 0x1BADFACE
+ BINFMTFS_MAGIC = 0x42494e4d
+ BTRFS_SUPER_MAGIC = 0x9123683E
+ CGROUP_SUPER_MAGIC = 0x27e0eb
+ CIFS_MAGIC_NUMBER = 0xFF534D42
+ CODA_SUPER_MAGIC = 0x73757245
+ COH_SUPER_MAGIC = 0x012FF7B7
+ CRAMFS_MAGIC = 0x28cd3d45
+ DEBUGFS_MAGIC = 0x64626720
+ DEVFS_SUPER_MAGIC = 0x1373
+ DEVPTS_SUPER_MAGIC = 0x1cd1
+ EFIVARFS_MAGIC = 0xde5e81e4
+ EFS_SUPER_MAGIC = 0x00414A53
+ EXT_SUPER_MAGIC = 0x137D
+ EXT2_OLD_SUPER_MAGIC = 0xEF51
+ EXT2_SUPER_MAGIC = 0xEF53
+ EXT3_SUPER_MAGIC = 0xEF53
+ EXT4_SUPER_MAGIC = 0xEF53
+ FUSE_SUPER_MAGIC = 0x65735546
+ FUTEXFS_SUPER_MAGIC = 0xBAD1DEA
+ HFS_SUPER_MAGIC = 0x4244
+ HFSPLUS_SUPER_MAGIC = 0x482b
+ HOSTFS_SUPER_MAGIC = 0x00c0ffee
+ HPFS_SUPER_MAGIC = 0xF995E849
+ HUGETLBFS_MAGIC = 0x958458f6
+ ISOFS_SUPER_MAGIC = 0x9660
+ JFFS2_SUPER_MAGIC = 0x72b6
+ JFS_SUPER_MAGIC = 0x3153464a
+ MINIX_SUPER_MAGIC = 0x137F /* orig. minix */
+ MINIX_SUPER_MAGIC2 = 0x138F /* 30 char minix */
+ MINIX2_SUPER_MAGIC = 0x2468 /* minix V2 */
+ MINIX2_SUPER_MAGIC2 = 0x2478 /* minix V2, 30 char names */
+ MINIX3_SUPER_MAGIC = 0x4d5a /* minix V3 fs, 60 char names */
+ MQUEUE_MAGIC = 0x19800202
+ MSDOS_SUPER_MAGIC = 0x4d44
+ NCP_SUPER_MAGIC = 0x564c
+ NFS_SUPER_MAGIC = 0x6969
+ NILFS_SUPER_MAGIC = 0x3434
+ NTFS_SB_MAGIC = 0x5346544e
+ OCFS2_SUPER_MAGIC = 0x7461636f
+ OPENPROM_SUPER_MAGIC = 0x9fa1
+ PIPEFS_MAGIC = 0x50495045
+ PROC_SUPER_MAGIC = 0x9fa0
+ PSTOREFS_MAGIC = 0x6165676C
+ QNX4_SUPER_MAGIC = 0x002f
+ QNX6_SUPER_MAGIC = 0x68191122
+ RAMFS_MAGIC = 0x858458f6
+ REISERFS_SUPER_MAGIC = 0x52654973
+ ROMFS_MAGIC = 0x7275
+ SELINUX_MAGIC = 0xf97cff8c
+ SMACK_MAGIC = 0x43415d53
+ SMB_SUPER_MAGIC = 0x517B
+ SOCKFS_MAGIC = 0x534F434B
+ SQUASHFS_MAGIC = 0x73717368
+ SYSFS_MAGIC = 0x62656572
+ SYSV2_SUPER_MAGIC = 0x012FF7B6
+ SYSV4_SUPER_MAGIC = 0x012FF7B5
+ TMPFS_MAGIC = 0x01021994
+ UDF_SUPER_MAGIC = 0x15013346
+ UFS_MAGIC = 0x00011954
+ USBDEVICE_SUPER_MAGIC = 0x9fa2
+ V9FS_MAGIC = 0x01021997
+ VXFS_SUPER_MAGIC = 0xa501FCF5
+ XENFS_SUPER_MAGIC = 0xabba1974
+ XENIX_SUPER_MAGIC = 0x012FF7B4
+ XFS_SUPER_MAGIC = 0x58465342
+ _XIAFS_SUPER_MAGIC = 0x012FD16D
+
+ AFS_SUPER_MAGIC = 0x5346414F
+ AUFS_SUPER_MAGIC = 0x61756673
+ ANON_INODE_FS_SUPER_MAGIC = 0x09041934
+ BPF_FS_MAGIC = 0xCAFE4A11
+ CEPH_SUPER_MAGIC = 0x00C36400
+ CGROUP2_SUPER_MAGIC = 0x63677270
+ CONFIGFS_MAGIC = 0x62656570
+ ECRYPTFS_SUPER_MAGIC = 0xF15F
+ F2FS_SUPER_MAGIC = 0xF2F52010
+ FAT_SUPER_MAGIC = 0x4006
+ FHGFS_SUPER_MAGIC = 0x19830326
+ FUSEBLK_SUPER_MAGIC = 0x65735546
+ FUSECTL_SUPER_MAGIC = 0x65735543
+ GFS_SUPER_MAGIC = 0x1161970
+ GPFS_SUPER_MAGIC = 0x47504653
+ MTD_INODE_FS_SUPER_MAGIC = 0x11307854
+ INOTIFYFS_SUPER_MAGIC = 0x2BAD1DEA
+ ISOFS_R_WIN_SUPER_MAGIC = 0x4004
+ ISOFS_WIN_SUPER_MAGIC = 0x4000
+ JFFS_SUPER_MAGIC = 0x07C0
+ KAFS_SUPER_MAGIC = 0x6B414653
+ LUSTRE_SUPER_MAGIC = 0x0BD00BD0
+ NFSD_SUPER_MAGIC = 0x6E667364
+ NSFS_MAGIC = 0x6E736673
+ PANFS_SUPER_MAGIC = 0xAAD7AAEA
+ RPC_PIPEFS_SUPER_MAGIC = 0x67596969
+ SECURITYFS_SUPER_MAGIC = 0x73636673
+ TRACEFS_MAGIC = 0x74726163
+ UFS_BYTESWAPPED_SUPER_MAGIC = 0x54190100
+ VMHGFS_SUPER_MAGIC = 0xBACBACBC
+ VZFS_SUPER_MAGIC = 0x565A4653
+ ZFS_SUPER_MAGIC = 0x2FC12FC1
+)
+
+// coreutils/src/stat.c
+var fsTypeMap = map[int64]string{
+ ADFS_SUPER_MAGIC: "adfs", /* 0xADF5 local */
+ AFFS_SUPER_MAGIC: "affs", /* 0xADFF local */
+ AFS_SUPER_MAGIC: "afs", /* 0x5346414F remote */
+ ANON_INODE_FS_SUPER_MAGIC: "anon-inode FS", /* 0x09041934 local */
+ AUFS_SUPER_MAGIC: "aufs", /* 0x61756673 remote */
+ // AUTOFS_SUPER_MAGIC: "autofs", /* 0x0187 local */
+ BEFS_SUPER_MAGIC: "befs", /* 0x42465331 local */
+ BDEVFS_MAGIC: "bdevfs", /* 0x62646576 local */
+ BFS_MAGIC: "bfs", /* 0x1BADFACE local */
+ BINFMTFS_MAGIC: "binfmt_misc", /* 0x42494E4D local */
+ BPF_FS_MAGIC: "bpf", /* 0xCAFE4A11 local */
+ BTRFS_SUPER_MAGIC: "btrfs", /* 0x9123683E local */
+ CEPH_SUPER_MAGIC: "ceph", /* 0x00C36400 remote */
+ CGROUP_SUPER_MAGIC: "cgroupfs", /* 0x0027E0EB local */
+ CGROUP2_SUPER_MAGIC: "cgroup2fs", /* 0x63677270 local */
+ CIFS_MAGIC_NUMBER: "cifs", /* 0xFF534D42 remote */
+ CODA_SUPER_MAGIC: "coda", /* 0x73757245 remote */
+ COH_SUPER_MAGIC: "coh", /* 0x012FF7B7 local */
+ CONFIGFS_MAGIC: "configfs", /* 0x62656570 local */
+ CRAMFS_MAGIC: "cramfs", /* 0x28CD3D45 local */
+ DEBUGFS_MAGIC: "debugfs", /* 0x64626720 local */
+ DEVFS_SUPER_MAGIC: "devfs", /* 0x1373 local */
+ DEVPTS_SUPER_MAGIC: "devpts", /* 0x1CD1 local */
+ ECRYPTFS_SUPER_MAGIC: "ecryptfs", /* 0xF15F local */
+ EFIVARFS_MAGIC: "efivarfs", /* 0xDE5E81E4 local */
+ EFS_SUPER_MAGIC: "efs", /* 0x00414A53 local */
+ EXT_SUPER_MAGIC: "ext", /* 0x137D local */
+ EXT2_SUPER_MAGIC: "ext2/ext3", /* 0xEF53 local */
+ EXT2_OLD_SUPER_MAGIC: "ext2", /* 0xEF51 local */
+ F2FS_SUPER_MAGIC: "f2fs", /* 0xF2F52010 local */
+ FAT_SUPER_MAGIC: "fat", /* 0x4006 local */
+ FHGFS_SUPER_MAGIC: "fhgfs", /* 0x19830326 remote */
+ FUSEBLK_SUPER_MAGIC: "fuseblk", /* 0x65735546 remote */
+ FUSECTL_SUPER_MAGIC: "fusectl", /* 0x65735543 remote */
+ FUTEXFS_SUPER_MAGIC: "futexfs", /* 0x0BAD1DEA local */
+ GFS_SUPER_MAGIC: "gfs/gfs2", /* 0x1161970 remote */
+ GPFS_SUPER_MAGIC: "gpfs", /* 0x47504653 remote */
+ HFS_SUPER_MAGIC: "hfs", /* 0x4244 local */
+ HFSPLUS_SUPER_MAGIC: "hfsplus", /* 0x482b local */
+ HPFS_SUPER_MAGIC: "hpfs", /* 0xF995E849 local */
+ HUGETLBFS_MAGIC: "hugetlbfs", /* 0x958458F6 local */
+ MTD_INODE_FS_SUPER_MAGIC: "inodefs", /* 0x11307854 local */
+ INOTIFYFS_SUPER_MAGIC: "inotifyfs", /* 0x2BAD1DEA local */
+ ISOFS_SUPER_MAGIC: "isofs", /* 0x9660 local */
+ ISOFS_R_WIN_SUPER_MAGIC: "isofs", /* 0x4004 local */
+ ISOFS_WIN_SUPER_MAGIC: "isofs", /* 0x4000 local */
+ JFFS_SUPER_MAGIC: "jffs", /* 0x07C0 local */
+ JFFS2_SUPER_MAGIC: "jffs2", /* 0x72B6 local */
+ JFS_SUPER_MAGIC: "jfs", /* 0x3153464A local */
+ KAFS_SUPER_MAGIC: "k-afs", /* 0x6B414653 remote */
+ LUSTRE_SUPER_MAGIC: "lustre", /* 0x0BD00BD0 remote */
+ MINIX_SUPER_MAGIC: "minix", /* 0x137F local */
+ MINIX_SUPER_MAGIC2: "minix (30 char.)", /* 0x138F local */
+ MINIX2_SUPER_MAGIC: "minix v2", /* 0x2468 local */
+ MINIX2_SUPER_MAGIC2: "minix v2 (30 char.)", /* 0x2478 local */
+ MINIX3_SUPER_MAGIC: "minix3", /* 0x4D5A local */
+ MQUEUE_MAGIC: "mqueue", /* 0x19800202 local */
+ MSDOS_SUPER_MAGIC: "msdos", /* 0x4D44 local */
+ NCP_SUPER_MAGIC: "novell", /* 0x564C remote */
+ NFS_SUPER_MAGIC: "nfs", /* 0x6969 remote */
+ NFSD_SUPER_MAGIC: "nfsd", /* 0x6E667364 remote */
+ NILFS_SUPER_MAGIC: "nilfs", /* 0x3434 local */
+ NSFS_MAGIC: "nsfs", /* 0x6E736673 local */
+ NTFS_SB_MAGIC: "ntfs", /* 0x5346544E local */
+ OPENPROM_SUPER_MAGIC: "openprom", /* 0x9FA1 local */
+ OCFS2_SUPER_MAGIC: "ocfs2", /* 0x7461636f remote */
+ PANFS_SUPER_MAGIC: "panfs", /* 0xAAD7AAEA remote */
+ PIPEFS_MAGIC: "pipefs", /* 0x50495045 remote */
+ PROC_SUPER_MAGIC: "proc", /* 0x9FA0 local */
+ PSTOREFS_MAGIC: "pstorefs", /* 0x6165676C local */
+ QNX4_SUPER_MAGIC: "qnx4", /* 0x002F local */
+ QNX6_SUPER_MAGIC: "qnx6", /* 0x68191122 local */
+ RAMFS_MAGIC: "ramfs", /* 0x858458F6 local */
+ REISERFS_SUPER_MAGIC: "reiserfs", /* 0x52654973 local */
+ ROMFS_MAGIC: "romfs", /* 0x7275 local */
+ RPC_PIPEFS_SUPER_MAGIC: "rpc_pipefs", /* 0x67596969 local */
+ SECURITYFS_SUPER_MAGIC: "securityfs", /* 0x73636673 local */
+ SELINUX_MAGIC: "selinux", /* 0xF97CFF8C local */
+ SMB_SUPER_MAGIC: "smb", /* 0x517B remote */
+ SOCKFS_MAGIC: "sockfs", /* 0x534F434B local */
+ SQUASHFS_MAGIC: "squashfs", /* 0x73717368 local */
+ SYSFS_MAGIC: "sysfs", /* 0x62656572 local */
+ SYSV2_SUPER_MAGIC: "sysv2", /* 0x012FF7B6 local */
+ SYSV4_SUPER_MAGIC: "sysv4", /* 0x012FF7B5 local */
+ TMPFS_MAGIC: "tmpfs", /* 0x01021994 local */
+ TRACEFS_MAGIC: "tracefs", /* 0x74726163 local */
+ UDF_SUPER_MAGIC: "udf", /* 0x15013346 local */
+ UFS_MAGIC: "ufs", /* 0x00011954 local */
+ UFS_BYTESWAPPED_SUPER_MAGIC: "ufs", /* 0x54190100 local */
+ USBDEVICE_SUPER_MAGIC: "usbdevfs", /* 0x9FA2 local */
+ V9FS_MAGIC: "v9fs", /* 0x01021997 local */
+ VMHGFS_SUPER_MAGIC: "vmhgfs", /* 0xBACBACBC remote */
+ VXFS_SUPER_MAGIC: "vxfs", /* 0xA501FCF5 local */
+ VZFS_SUPER_MAGIC: "vzfs", /* 0x565A4653 local */
+ XENFS_SUPER_MAGIC: "xenfs", /* 0xABBA1974 local */
+ XENIX_SUPER_MAGIC: "xenix", /* 0x012FF7B4 local */
+ XFS_SUPER_MAGIC: "xfs", /* 0x58465342 local */
+ _XIAFS_SUPER_MAGIC: "xia", /* 0x012FD16D local */
+ ZFS_SUPER_MAGIC: "zfs", /* 0x2FC12FC1 local */
+}
+
+// readMountFile reads mountinfo or mounts file under the specified root path
+// (eg, /proc/1, /proc/self, etc)
+func readMountFile(root string) (lines []string, useMounts bool, filename string, err error) {
+ filename = path.Join(root, "mountinfo")
+ lines, err = common.ReadLines(filename)
+ if err != nil {
+ var pathErr *os.PathError
+ if !errors.As(err, &pathErr) {
+ return
+ }
+ // if kernel does not support 1/mountinfo, fallback to 1/mounts (<2.6.26)
+ useMounts = true
+ filename = path.Join(root, "mounts")
+ lines, err = common.ReadLines(filename)
+ if err != nil {
+ return
+ }
+ return
+ }
+ return
+}
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ // by default, try "/proc/1/..." first
+ root := common.HostProc(path.Join("1"))
+
+ // force preference for dirname of HOST_PROC_MOUNTINFO, if set #1271
+ hpmPath := os.Getenv("HOST_PROC_MOUNTINFO")
+ if hpmPath != "" {
+ root = filepath.Dir(hpmPath)
+ }
+
+ lines, useMounts, filename, err := readMountFile(root)
+ if err != nil {
+ if hpmPath != "" { // don't fallback with HOST_PROC_MOUNTINFO
+ return nil, err
+ }
+ // fallback to "/proc/self/..." #1159
+ lines, useMounts, filename, err = readMountFile(common.HostProc(path.Join("self")))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ fs, err := getFileSystems()
+ if err != nil && !all {
+ return nil, err
+ }
+
+ ret := make([]PartitionStat, 0, len(lines))
+
+ for _, line := range lines {
+ var d PartitionStat
+ if useMounts {
+ fields := strings.Fields(line)
+
+ d = PartitionStat{
+ Device: fields[0],
+ Mountpoint: unescapeFstab(fields[1]),
+ Fstype: fields[2],
+ Opts: strings.Fields(fields[3]),
+ }
+
+ if !all {
+ if d.Device == "none" || !common.StringsHas(fs, d.Fstype) {
+ continue
+ }
+ }
+ } else {
+ // a line of 1/mountinfo has the following structure:
+ // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ // (1) (2) (3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ // split the mountinfo line by the separator hyphen
+ parts := strings.Split(line, " - ")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("found invalid mountinfo line in file %s: %s ", filename, line)
+ }
+
+ fields := strings.Fields(parts[0])
+ blockDeviceID := fields[2]
+ mountPoint := fields[4]
+ mountOpts := strings.Split(fields[5], ",")
+
+ if rootDir := fields[3]; rootDir != "" && rootDir != "/" {
+ mountOpts = append(mountOpts, "bind")
+ }
+
+ fields = strings.Fields(parts[1])
+ fstype := fields[0]
+ device := fields[1]
+
+ d = PartitionStat{
+ Device: device,
+ Mountpoint: unescapeFstab(mountPoint),
+ Fstype: fstype,
+ Opts: mountOpts,
+ }
+
+ if !all {
+ if d.Device == "none" || !common.StringsHas(fs, d.Fstype) {
+ continue
+ }
+ }
+
+ if strings.HasPrefix(d.Device, "/dev/mapper/") {
+ devpath, err := filepath.EvalSymlinks(common.HostDev(strings.Replace(d.Device, "/dev", "", -1)))
+ if err == nil {
+ d.Device = devpath
+ }
+ }
+
+ // /dev/root is not the real device name
+ // so we get the real device name from its major/minor number
+ if d.Device == "/dev/root" {
+ devpath, err := os.Readlink(common.HostSys("/dev/block/" + blockDeviceID))
+ if err == nil {
+ d.Device = strings.Replace(d.Device, "root", filepath.Base(devpath), 1)
+ }
+ }
+ }
+ ret = append(ret, d)
+ }
+
+ return ret, nil
+}
+
+// getFileSystems returns supported filesystems from /proc/filesystems
+func getFileSystems() ([]string, error) {
+ filename := common.HostProc("filesystems")
+ lines, err := common.ReadLines(filename)
+ if err != nil {
+ return nil, err
+ }
+ var ret []string
+ for _, line := range lines {
+ if !strings.HasPrefix(line, "nodev") {
+ ret = append(ret, strings.TrimSpace(line))
+ continue
+ }
+ t := strings.Split(line, "\t")
+ if len(t) != 2 || t[1] != "zfs" {
+ continue
+ }
+ ret = append(ret, strings.TrimSpace(t[1]))
+ }
+
+ return ret, nil
+}
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ filename := common.HostProc("diskstats")
+ lines, err := common.ReadLines(filename)
+ if err != nil {
+ return nil, err
+ }
+ ret := make(map[string]IOCountersStat)
+ empty := IOCountersStat{}
+
+ // use only basename such as "/dev/sda1" to "sda1"
+ for i, name := range names {
+ names[i] = filepath.Base(name)
+ }
+
+ for _, line := range lines {
+ fields := strings.Fields(line)
+ if len(fields) < 14 {
+ // malformed line in /proc/diskstats, avoid panic by ignoring.
+ continue
+ }
+ name := fields[2]
+
+ if len(names) > 0 && !common.StringsHas(names, name) {
+ continue
+ }
+
+ reads, err := strconv.ParseUint((fields[3]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ mergedReads, err := strconv.ParseUint((fields[4]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ rbytes, err := strconv.ParseUint((fields[5]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ rtime, err := strconv.ParseUint((fields[6]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ writes, err := strconv.ParseUint((fields[7]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ mergedWrites, err := strconv.ParseUint((fields[8]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ wbytes, err := strconv.ParseUint((fields[9]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ wtime, err := strconv.ParseUint((fields[10]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ iopsInProgress, err := strconv.ParseUint((fields[11]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ iotime, err := strconv.ParseUint((fields[12]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ weightedIO, err := strconv.ParseUint((fields[13]), 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ d := IOCountersStat{
+ ReadBytes: rbytes * sectorSize,
+ WriteBytes: wbytes * sectorSize,
+ ReadCount: reads,
+ WriteCount: writes,
+ MergedReadCount: mergedReads,
+ MergedWriteCount: mergedWrites,
+ ReadTime: rtime,
+ WriteTime: wtime,
+ IopsInProgress: iopsInProgress,
+ IoTime: iotime,
+ WeightedIO: weightedIO,
+ }
+ if d == empty {
+ continue
+ }
+ d.Name = name
+
+ d.SerialNumber, _ = SerialNumberWithContext(ctx, name)
+ d.Label, _ = LabelWithContext(ctx, name)
+
+ ret[name] = d
+ }
+ return ret, nil
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ var stat unix.Stat_t
+ err := unix.Stat(name, &stat)
+ if err != nil {
+ return "", err
+ }
+ major := unix.Major(uint64(stat.Rdev))
+ minor := unix.Minor(uint64(stat.Rdev))
+
+ // Try to get the serial from udev data
+ udevDataPath := common.HostRun(fmt.Sprintf("udev/data/b%d:%d", major, minor))
+ if udevdata, err := ioutil.ReadFile(udevDataPath); err == nil {
+ scanner := bufio.NewScanner(bytes.NewReader(udevdata))
+ for scanner.Scan() {
+ values := strings.Split(scanner.Text(), "=")
+ if len(values) == 2 && values[0] == "E:ID_SERIAL" {
+ return values[1], nil
+ }
+ }
+ }
+
+ // Try to get the serial from sysfs, look at the disk device (minor 0) directly
+ // because if it is a partition it is not going to contain any device information
+ devicePath := common.HostSys(fmt.Sprintf("dev/block/%d:0/device", major))
+ model, _ := ioutil.ReadFile(filepath.Join(devicePath, "model"))
+ serial, _ := ioutil.ReadFile(filepath.Join(devicePath, "serial"))
+ if len(model) > 0 && len(serial) > 0 {
+ return fmt.Sprintf("%s_%s", string(model), string(serial)), nil
+ }
+ return "", nil
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ // Try label based on devicemapper name
+ dmname_filename := common.HostSys(fmt.Sprintf("block/%s/dm/name", name))
+
+ if !common.PathExists(dmname_filename) {
+ return "", nil
+ }
+
+ dmname, err := ioutil.ReadFile(dmname_filename)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(dmname)), nil
+}
+
+func getFsType(stat unix.Statfs_t) string {
+ t := int64(stat.Type)
+ ret, ok := fsTypeMap[t]
+ if !ok {
+ return ""
+ }
+ return ret
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go
new file mode 100644
index 000000000..81ff23994
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go
@@ -0,0 +1,159 @@
+//go:build openbsd
+// +build openbsd
+
+package disk
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ var ret []PartitionStat
+
+ // get length
+ count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
+ if err != nil {
+ return ret, err
+ }
+
+ fs := make([]unix.Statfs_t, count)
+ if _, err = unix.Getfsstat(fs, unix.MNT_WAIT); err != nil {
+ return ret, err
+ }
+
+ for _, stat := range fs {
+ opts := []string{"rw"}
+ if stat.F_flags&unix.MNT_RDONLY != 0 {
+ opts = []string{"rw"}
+ }
+ if stat.F_flags&unix.MNT_SYNCHRONOUS != 0 {
+ opts = append(opts, "sync")
+ }
+ if stat.F_flags&unix.MNT_NOEXEC != 0 {
+ opts = append(opts, "noexec")
+ }
+ if stat.F_flags&unix.MNT_NOSUID != 0 {
+ opts = append(opts, "nosuid")
+ }
+ if stat.F_flags&unix.MNT_NODEV != 0 {
+ opts = append(opts, "nodev")
+ }
+ if stat.F_flags&unix.MNT_ASYNC != 0 {
+ opts = append(opts, "async")
+ }
+ if stat.F_flags&unix.MNT_SOFTDEP != 0 {
+ opts = append(opts, "softdep")
+ }
+ if stat.F_flags&unix.MNT_NOATIME != 0 {
+ opts = append(opts, "noatime")
+ }
+ if stat.F_flags&unix.MNT_WXALLOWED != 0 {
+ opts = append(opts, "wxallowed")
+ }
+
+ d := PartitionStat{
+ Device: common.ByteToString(stat.F_mntfromname[:]),
+ Mountpoint: common.ByteToString(stat.F_mntonname[:]),
+ Fstype: common.ByteToString(stat.F_fstypename[:]),
+ Opts: opts,
+ }
+
+ ret = append(ret, d)
+ }
+
+ return ret, nil
+}
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ ret := make(map[string]IOCountersStat)
+
+ r, err := unix.SysctlRaw("hw.diskstats")
+ if err != nil {
+ return nil, err
+ }
+ buf := []byte(r)
+ length := len(buf)
+
+ count := int(uint64(length) / uint64(sizeOfDiskstats))
+
+ // parse buf to Diskstats
+ for i := 0; i < count; i++ {
+ b := buf[i*sizeOfDiskstats : i*sizeOfDiskstats+sizeOfDiskstats]
+ d, err := parseDiskstats(b)
+ if err != nil {
+ continue
+ }
+ name := common.IntToString(d.Name[:])
+
+ if len(names) > 0 && !common.StringsHas(names, name) {
+ continue
+ }
+
+ ds := IOCountersStat{
+ ReadCount: d.Rxfer,
+ WriteCount: d.Wxfer,
+ ReadBytes: d.Rbytes,
+ WriteBytes: d.Wbytes,
+ Name: name,
+ }
+ ret[name] = ds
+ }
+
+ return ret, nil
+}
+
+// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE)
+
+func parseDiskstats(buf []byte) (Diskstats, error) {
+ var ds Diskstats
+ br := bytes.NewReader(buf)
+ // err := binary.Read(br, binary.LittleEndian, &ds)
+ err := common.Read(br, binary.LittleEndian, &ds)
+ if err != nil {
+ return ds, err
+ }
+
+ return ds, nil
+}
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ stat := unix.Statfs_t{}
+ err := unix.Statfs(path, &stat)
+ if err != nil {
+ return nil, err
+ }
+ bsize := stat.F_bsize
+
+ ret := &UsageStat{
+ Path: path,
+ Fstype: getFsType(stat),
+ Total: (uint64(stat.F_blocks) * uint64(bsize)),
+ Free: (uint64(stat.F_bavail) * uint64(bsize)),
+ InodesTotal: (uint64(stat.F_files)),
+ InodesFree: (uint64(stat.F_ffree)),
+ }
+
+ ret.InodesUsed = (ret.InodesTotal - ret.InodesFree)
+ ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0
+ ret.Used = (uint64(stat.F_blocks) - uint64(stat.F_bfree)) * uint64(bsize)
+ ret.UsedPercent = (float64(ret.Used) / float64(ret.Total)) * 100.0
+
+ return ret, nil
+}
+
+func getFsType(stat unix.Statfs_t) string {
+ return common.ByteToString(stat.F_fstypename[:])
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_386.go
new file mode 100644
index 000000000..f4c139f5e
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_386.go
@@ -0,0 +1,38 @@
+//go:build openbsd && 386
+// +build openbsd,386
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs disk/types_openbsd.go
+
+package disk
+
+const (
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfDiskstats = 0x60
+)
+
+type Diskstats struct {
+ Name [16]int8
+ Busy int32
+ Rxfer uint64
+ Wxfer uint64
+ Seek uint64
+ Rbytes uint64
+ Wbytes uint64
+ Attachtime Timeval
+ Timestamp Timeval
+ Time Timeval
+}
+type Timeval struct {
+ Sec int64
+ Usec int32
+}
+
+type Diskstat struct{}
+type bintime struct{}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_amd64.go
new file mode 100644
index 000000000..c1bd52ef8
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_amd64.go
@@ -0,0 +1,36 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_openbsd.go
+
+package disk
+
+const (
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfDiskstats = 0x70
+)
+
+type Diskstats struct {
+ Name [16]int8
+ Busy int32
+ Pad_cgo_0 [4]byte
+ Rxfer uint64
+ Wxfer uint64
+ Seek uint64
+ Rbytes uint64
+ Wbytes uint64
+ Attachtime Timeval
+ Timestamp Timeval
+ Time Timeval
+}
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Diskstat struct{}
+type bintime struct{}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go
new file mode 100644
index 000000000..86054a626
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go
@@ -0,0 +1,38 @@
+//go:build openbsd && arm
+// +build openbsd,arm
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs disk/types_openbsd.go
+
+package disk
+
+const (
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfDiskstats = 0x60
+)
+
+type Diskstats struct {
+ Name [16]int8
+ Busy int32
+ Rxfer uint64
+ Wxfer uint64
+ Seek uint64
+ Rbytes uint64
+ Wbytes uint64
+ Attachtime Timeval
+ Timestamp Timeval
+ Time Timeval
+}
+type Timeval struct {
+ Sec int64
+ Usec int32
+}
+
+type Diskstat struct{}
+type bintime struct{}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm64.go
new file mode 100644
index 000000000..ae1cf57e1
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm64.go
@@ -0,0 +1,38 @@
+//go:build openbsd && arm64
+// +build openbsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs disk/types_openbsd.go
+
+package disk
+
+const (
+ devstat_NO_DATA = 0x00
+ devstat_READ = 0x01
+ devstat_WRITE = 0x02
+ devstat_FREE = 0x03
+)
+
+const (
+ sizeOfDiskstats = 0x70
+)
+
+type Diskstats struct {
+ Name [16]int8
+ Busy int32
+ Rxfer uint64
+ Wxfer uint64
+ Seek uint64
+ Rbytes uint64
+ Wbytes uint64
+ Attachtime Timeval
+ Timestamp Timeval
+ Time Timeval
+}
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Diskstat struct{}
+type bintime struct{}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go
new file mode 100644
index 000000000..9c4a798d0
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go
@@ -0,0 +1,147 @@
+//go:build solaris
+// +build solaris
+
+package disk
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "math"
+ "os"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // _DEFAULT_NUM_MOUNTS is set to `cat /etc/mnttab | wc -l` rounded up to the
+ // nearest power of two.
+ _DEFAULT_NUM_MOUNTS = 32
+
+ // _MNTTAB default place to read mount information
+ _MNTTAB = "/etc/mnttab"
+)
+
+// A blacklist of read-only virtual filesystems. Writable filesystems are of
+// operational concern and must not be included in this list.
+var fsTypeBlacklist = map[string]struct{}{
+ "ctfs": {},
+ "dev": {},
+ "fd": {},
+ "lofs": {},
+ "lxproc": {},
+ "mntfs": {},
+ "objfs": {},
+ "proc": {},
+}
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ ret := make([]PartitionStat, 0, _DEFAULT_NUM_MOUNTS)
+
+ // Scan mnttab(4)
+ f, err := os.Open(_MNTTAB)
+ if err != nil {
+ }
+ defer func() {
+ if err == nil {
+ err = f.Close()
+ } else {
+ f.Close()
+ }
+ }()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ fields := strings.Split(scanner.Text(), "\t")
+
+ if _, found := fsTypeBlacklist[fields[2]]; found {
+ continue
+ }
+
+ ret = append(ret, PartitionStat{
+ // NOTE(seanc@): Device isn't exactly accurate: from mnttab(4): "The name
+ // of the resource that has been mounted." Ideally this value would come
+ // from Statvfs_t.Fsid but I'm leaving it to the caller to traverse
+ // unix.Statvfs().
+ Device: fields[0],
+ Mountpoint: fields[1],
+ Fstype: fields[2],
+ Opts: strings.Split(fields[3], ","),
+ })
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("unable to scan %q: %v", _MNTTAB, err)
+ }
+
+ return ret, err
+}
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ statvfs := unix.Statvfs_t{}
+ if err := unix.Statvfs(path, &statvfs); err != nil {
+ return nil, fmt.Errorf("unable to call statvfs(2) on %q: %v", path, err)
+ }
+
+ usageStat := &UsageStat{
+ Path: path,
+ Fstype: common.IntToString(statvfs.Basetype[:]),
+ Total: statvfs.Blocks * statvfs.Frsize,
+ Free: statvfs.Bfree * statvfs.Frsize,
+ Used: (statvfs.Blocks - statvfs.Bfree) * statvfs.Frsize,
+
+ // NOTE: ZFS (and FreeBZSD's UFS2) use dynamic inode/dnode allocation.
+ // Explicitly return a near-zero value for InodesUsedPercent so that nothing
+ // attempts to garbage collect based on a lack of available inodes/dnodes.
+ // Similarly, don't use the zero value to prevent divide-by-zero situations
+ // and inject a faux near-zero value. Filesystems evolve. Has your
+ // filesystem evolved? Probably not if you care about the number of
+ // available inodes.
+ InodesTotal: 1024.0 * 1024.0,
+ InodesUsed: 1024.0,
+ InodesFree: math.MaxUint64,
+ InodesUsedPercent: (1024.0 / (1024.0 * 1024.0)) * 100.0,
+ }
+
+ usageStat.UsedPercent = (float64(usageStat.Used) / float64(usageStat.Total)) * 100.0
+
+ return usageStat, nil
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ out, err := invoke.CommandWithContext(ctx, "cfgadm", "-ls", "select=type(disk),cols=ap_id:info,cols2=,noheadings")
+ if err != nil {
+ return "", fmt.Errorf("exec cfgadm: %w", err)
+ }
+
+ suf := "::" + strings.TrimPrefix(name, "/dev/")
+ s := bufio.NewScanner(bytes.NewReader(out))
+ for s.Scan() {
+ flds := strings.Fields(s.Text())
+ if strings.HasSuffix(flds[0], suf) {
+ flen := len(flds)
+ if flen >= 3 {
+ for i, f := range flds {
+ if i > 0 && i < flen-1 && f == "SN:" {
+ return flds[i+1], nil
+ }
+ }
+ }
+ return "", nil
+ }
+ }
+ if err := s.Err(); err != nil {
+ return "", err
+ }
+ return "", nil
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go
new file mode 100644
index 000000000..bdb62b24d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go
@@ -0,0 +1,62 @@
+//go:build freebsd || linux || darwin
+// +build freebsd linux darwin
+
+package disk
+
+import (
+ "context"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ stat := unix.Statfs_t{}
+ err := unix.Statfs(path, &stat)
+ if err != nil {
+ return nil, err
+ }
+ bsize := stat.Bsize
+
+ ret := &UsageStat{
+ Path: unescapeFstab(path),
+ Fstype: getFsType(stat),
+ Total: (uint64(stat.Blocks) * uint64(bsize)),
+ Free: (uint64(stat.Bavail) * uint64(bsize)),
+ InodesTotal: (uint64(stat.Files)),
+ InodesFree: (uint64(stat.Ffree)),
+ }
+
+ // if could not get InodesTotal, return empty
+ if ret.InodesTotal < ret.InodesFree {
+ return ret, nil
+ }
+
+ ret.InodesUsed = (ret.InodesTotal - ret.InodesFree)
+ ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize)
+
+ if ret.InodesTotal == 0 {
+ ret.InodesUsedPercent = 0
+ } else {
+ ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0
+ }
+
+ if (ret.Used + ret.Free) == 0 {
+ ret.UsedPercent = 0
+ } else {
+ // We don't use ret.Total to calculate percent.
+ // see https://github.com/shirou/gopsutil/issues/562
+ ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0
+ }
+
+ return ret, nil
+}
+
+// Unescape escaped octal chars (like space 040, ampersand 046 and backslash 134) to their real value in fstab fields issue#555
+func unescapeFstab(path string) string {
+ escaped, err := strconv.Unquote(`"` + path + `"`)
+ if err != nil {
+ return path
+ }
+ return escaped
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go
new file mode 100644
index 000000000..5fb9b5b42
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go
@@ -0,0 +1,201 @@
+//go:build windows
+// +build windows
+
+package disk
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/windows"
+ "golang.org/x/sys/windows/registry"
+)
+
+var (
+ procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW")
+ procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW")
+ procGetDriveType = common.Modkernel32.NewProc("GetDriveTypeW")
+ procGetVolumeInformation = common.Modkernel32.NewProc("GetVolumeInformationW")
+)
+
+var (
+ fileFileCompression = int64(16) // 0x00000010
+ fileReadOnlyVolume = int64(524288) // 0x00080000
+)
+
+// diskPerformance is an equivalent representation of DISK_PERFORMANCE in the Windows API.
+// https://docs.microsoft.com/fr-fr/windows/win32/api/winioctl/ns-winioctl-disk_performance
+type diskPerformance struct {
+ BytesRead int64
+ BytesWritten int64
+ ReadTime int64
+ WriteTime int64
+ IdleTime int64
+ ReadCount uint32
+ WriteCount uint32
+ QueueDepth uint32
+ SplitCount uint32
+ QueryTime int64
+ StorageDeviceNumber uint32
+ StorageManagerName [8]uint16
+ alignmentPadding uint32 // necessary for 32bit support, see https://github.com/elastic/beats/pull/16553
+}
+
+func init() {
+ // enable disk performance counters on Windows Server editions (needs to run as admin)
+ key, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Services\PartMgr`, registry.SET_VALUE)
+ if err == nil {
+ key.SetDWordValue("EnableCounterForIoctl", 1)
+ }
+}
+
+func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) {
+ lpFreeBytesAvailable := int64(0)
+ lpTotalNumberOfBytes := int64(0)
+ lpTotalNumberOfFreeBytes := int64(0)
+ diskret, _, err := procGetDiskFreeSpaceExW.Call(
+ uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(path))),
+ uintptr(unsafe.Pointer(&lpFreeBytesAvailable)),
+ uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)),
+ uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes)))
+ if diskret == 0 {
+ return nil, err
+ }
+ ret := &UsageStat{
+ Path: path,
+ Total: uint64(lpTotalNumberOfBytes),
+ Free: uint64(lpTotalNumberOfFreeBytes),
+ Used: uint64(lpTotalNumberOfBytes) - uint64(lpTotalNumberOfFreeBytes),
+ UsedPercent: (float64(lpTotalNumberOfBytes) - float64(lpTotalNumberOfFreeBytes)) / float64(lpTotalNumberOfBytes) * 100,
+ // InodesTotal: 0,
+ // InodesFree: 0,
+ // InodesUsed: 0,
+ // InodesUsedPercent: 0,
+ }
+ return ret, nil
+}
+
+func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
+ var ret []PartitionStat
+ lpBuffer := make([]byte, 254)
+ diskret, _, err := procGetLogicalDriveStringsW.Call(
+ uintptr(len(lpBuffer)),
+ uintptr(unsafe.Pointer(&lpBuffer[0])))
+ if diskret == 0 {
+ return ret, err
+ }
+ for _, v := range lpBuffer {
+ if v >= 65 && v <= 90 {
+ path := string(v) + ":"
+ typepath, _ := windows.UTF16PtrFromString(path)
+ typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath)))
+ if typeret == 0 {
+ return ret, windows.GetLastError()
+ }
+ // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM
+
+ if typeret == 2 || typeret == 3 || typeret == 4 || typeret == 5 {
+ lpVolumeNameBuffer := make([]byte, 256)
+ lpVolumeSerialNumber := int64(0)
+ lpMaximumComponentLength := int64(0)
+ lpFileSystemFlags := int64(0)
+ lpFileSystemNameBuffer := make([]byte, 256)
+ volpath, _ := windows.UTF16PtrFromString(string(v) + ":/")
+ driveret, _, err := procGetVolumeInformation.Call(
+ uintptr(unsafe.Pointer(volpath)),
+ uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])),
+ uintptr(len(lpVolumeNameBuffer)),
+ uintptr(unsafe.Pointer(&lpVolumeSerialNumber)),
+ uintptr(unsafe.Pointer(&lpMaximumComponentLength)),
+ uintptr(unsafe.Pointer(&lpFileSystemFlags)),
+ uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])),
+ uintptr(len(lpFileSystemNameBuffer)))
+ if driveret == 0 {
+ if typeret == 5 || typeret == 2 {
+ continue // device is not ready will happen if there is no disk in the drive
+ }
+ return ret, err
+ }
+ opts := []string{"rw"}
+ if lpFileSystemFlags&fileReadOnlyVolume != 0 {
+ opts = []string{"ro"}
+ }
+ if lpFileSystemFlags&fileFileCompression != 0 {
+ opts = append(opts, "compress")
+ }
+
+ d := PartitionStat{
+ Mountpoint: path,
+ Device: path,
+ Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)),
+ Opts: opts,
+ }
+ ret = append(ret, d)
+ }
+ }
+ }
+ return ret, nil
+}
+
+func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
+ // https://github.com/giampaolo/psutil/blob/544e9daa4f66a9f80d7bf6c7886d693ee42f0a13/psutil/arch/windows/disk.c#L83
+ drivemap := make(map[string]IOCountersStat, 0)
+ var diskPerformance diskPerformance
+
+ lpBuffer := make([]uint16, 254)
+ lpBufferLen, err := windows.GetLogicalDriveStrings(uint32(len(lpBuffer)), &lpBuffer[0])
+ if err != nil {
+ return drivemap, err
+ }
+ for _, v := range lpBuffer[:lpBufferLen] {
+ if 'A' <= v && v <= 'Z' {
+ path := string(rune(v)) + ":"
+ typepath, _ := windows.UTF16PtrFromString(path)
+ typeret := windows.GetDriveType(typepath)
+ if typeret == 0 {
+ return drivemap, windows.GetLastError()
+ }
+ if typeret != windows.DRIVE_FIXED {
+ continue
+ }
+ szDevice := fmt.Sprintf(`\\.\%s`, path)
+ const IOCTL_DISK_PERFORMANCE = 0x70020
+ h, err := windows.CreateFile(syscall.StringToUTF16Ptr(szDevice), 0, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE, nil, windows.OPEN_EXISTING, 0, 0)
+ if err != nil {
+ if err == windows.ERROR_FILE_NOT_FOUND {
+ continue
+ }
+ return drivemap, err
+ }
+ defer windows.CloseHandle(h)
+
+ var diskPerformanceSize uint32
+ err = windows.DeviceIoControl(h, IOCTL_DISK_PERFORMANCE, nil, 0, (*byte)(unsafe.Pointer(&diskPerformance)), uint32(unsafe.Sizeof(diskPerformance)), &diskPerformanceSize, nil)
+ if err != nil {
+ return drivemap, err
+ }
+ drivemap[path] = IOCountersStat{
+ ReadBytes: uint64(diskPerformance.BytesRead),
+ WriteBytes: uint64(diskPerformance.BytesWritten),
+ ReadCount: uint64(diskPerformance.ReadCount),
+ WriteCount: uint64(diskPerformance.WriteCount),
+ ReadTime: uint64(diskPerformance.ReadTime / 10000 / 1000), // convert to ms: https://github.com/giampaolo/psutil/issues/1012
+ WriteTime: uint64(diskPerformance.WriteTime / 10000 / 1000),
+ Name: path,
+ }
+ }
+ }
+ return drivemap, nil
+}
+
+func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func LabelWithContext(ctx context.Context, name string) (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c
new file mode 100644
index 000000000..8aab04f60
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c
@@ -0,0 +1,129 @@
+// https://github.com/lufia/iostat/blob/9f7362b77ad333b26c01c99de52a11bdb650ded2/iostat_darwin.c
+#include
+#include
+#include "iostat_darwin.h"
+
+#define IOKIT 1 /* to get io_name_t in device_types.h */
+
+#include
+#include
+#include
+#include
+
+#include
+
+static int getdrivestat(io_registry_entry_t d, DriveStats *stat);
+static int fillstat(io_registry_entry_t d, DriveStats *stat);
+
+int
+gopsutil_v3_readdrivestat(DriveStats a[], int n)
+{
+ CFMutableDictionaryRef match;
+ io_iterator_t drives;
+ io_registry_entry_t d;
+ kern_return_t status;
+ int na, rv;
+
+ match = IOServiceMatching("IOMedia");
+ CFDictionaryAddValue(match, CFSTR(kIOMediaWholeKey), kCFBooleanTrue);
+ status = IOServiceGetMatchingServices(0, match, &drives);
+ if(status != KERN_SUCCESS)
+ return -1;
+
+ na = 0;
+ while(na < n && (d=IOIteratorNext(drives)) > 0){
+ rv = getdrivestat(d, &a[na]);
+ if(rv < 0)
+ return -1;
+ if(rv > 0)
+ na++;
+ IOObjectRelease(d);
+ }
+ IOObjectRelease(drives);
+ return na;
+}
+
+static int
+getdrivestat(io_registry_entry_t d, DriveStats *stat)
+{
+ io_registry_entry_t parent;
+ kern_return_t status;
+ CFDictionaryRef props;
+ CFStringRef name;
+ CFNumberRef num;
+ int rv;
+
+ memset(stat, 0, sizeof *stat);
+ status = IORegistryEntryGetParentEntry(d, kIOServicePlane, &parent);
+ if(status != KERN_SUCCESS)
+ return -1;
+ if(!IOObjectConformsTo(parent, "IOBlockStorageDriver")){
+ IOObjectRelease(parent);
+ return 0;
+ }
+
+ status = IORegistryEntryCreateCFProperties(d, (CFMutableDictionaryRef *)&props, kCFAllocatorDefault, kNilOptions);
+ if(status != KERN_SUCCESS){
+ IOObjectRelease(parent);
+ return -1;
+ }
+ name = (CFStringRef)CFDictionaryGetValue(props, CFSTR(kIOBSDNameKey));
+ CFStringGetCString(name, stat->name, NAMELEN, CFStringGetSystemEncoding());
+ num = (CFNumberRef)CFDictionaryGetValue(props, CFSTR(kIOMediaSizeKey));
+ CFNumberGetValue(num, kCFNumberSInt64Type, &stat->size);
+ num = (CFNumberRef)CFDictionaryGetValue(props, CFSTR(kIOMediaPreferredBlockSizeKey));
+ CFNumberGetValue(num, kCFNumberSInt64Type, &stat->blocksize);
+ CFRelease(props);
+
+ rv = fillstat(parent, stat);
+ IOObjectRelease(parent);
+ if(rv < 0)
+ return -1;
+ return 1;
+}
+
+static struct {
+ char *key;
+ size_t off;
+} statstab[] = {
+ {kIOBlockStorageDriverStatisticsBytesReadKey, offsetof(DriveStats, read)},
+ {kIOBlockStorageDriverStatisticsBytesWrittenKey, offsetof(DriveStats, written)},
+ {kIOBlockStorageDriverStatisticsReadsKey, offsetof(DriveStats, nread)},
+ {kIOBlockStorageDriverStatisticsWritesKey, offsetof(DriveStats, nwrite)},
+ {kIOBlockStorageDriverStatisticsTotalReadTimeKey, offsetof(DriveStats, readtime)},
+ {kIOBlockStorageDriverStatisticsTotalWriteTimeKey, offsetof(DriveStats, writetime)},
+ {kIOBlockStorageDriverStatisticsLatentReadTimeKey, offsetof(DriveStats, readlat)},
+ {kIOBlockStorageDriverStatisticsLatentWriteTimeKey, offsetof(DriveStats, writelat)},
+};
+
+static int
+fillstat(io_registry_entry_t d, DriveStats *stat)
+{
+ CFDictionaryRef props, v;
+ CFNumberRef num;
+ kern_return_t status;
+ typeof(statstab[0]) *bp, *ep;
+
+ status = IORegistryEntryCreateCFProperties(d, (CFMutableDictionaryRef *)&props, kCFAllocatorDefault, kNilOptions);
+ if(status != KERN_SUCCESS)
+ return -1;
+ v = (CFDictionaryRef)CFDictionaryGetValue(props, CFSTR(kIOBlockStorageDriverStatisticsKey));
+ if(v == NULL){
+ CFRelease(props);
+ return -1;
+ }
+
+ ep = &statstab[sizeof(statstab)/sizeof(statstab[0])];
+ for(bp = &statstab[0]; bp < ep; bp++){
+ CFStringRef s;
+
+ s = CFStringCreateWithCString(kCFAllocatorDefault, bp->key, CFStringGetSystemEncoding());
+ num = (CFNumberRef)CFDictionaryGetValue(v, s);
+ if(num)
+ CFNumberGetValue(num, kCFNumberSInt64Type, ((char*)stat)+bp->off);
+ CFRelease(s);
+ }
+
+ CFRelease(props);
+ return 0;
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h
new file mode 100644
index 000000000..cb9ec7a0a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h
@@ -0,0 +1,32 @@
+// https://github.com/lufia/iostat/blob/9f7362b77ad333b26c01c99de52a11bdb650ded2/iostat_darwin.h
+typedef struct DriveStats DriveStats;
+typedef struct CPUStats CPUStats;
+
+enum {
+ NDRIVE = 16,
+ NAMELEN = 31
+};
+
+struct DriveStats {
+ char name[NAMELEN+1];
+ int64_t size;
+ int64_t blocksize;
+
+ int64_t read;
+ int64_t written;
+ int64_t nread;
+ int64_t nwrite;
+ int64_t readtime;
+ int64_t writetime;
+ int64_t readlat;
+ int64_t writelat;
+};
+
+struct CPUStats {
+ natural_t user;
+ natural_t nice;
+ natural_t sys;
+ natural_t idle;
+};
+
+extern int gopsutil_v3_readdrivestat(DriveStats a[], int n);
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host.go
new file mode 100644
index 000000000..7c53e2084
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host.go
@@ -0,0 +1,157 @@
+package host
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "os"
+ "runtime"
+ "time"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var invoke common.Invoker = common.Invoke{}
+
+// A HostInfoStat describes the host status.
+// This is not in the psutil but it useful.
+type InfoStat struct {
+ Hostname string `json:"hostname"`
+ Uptime uint64 `json:"uptime"`
+ BootTime uint64 `json:"bootTime"`
+ Procs uint64 `json:"procs"` // number of processes
+ OS string `json:"os"` // ex: freebsd, linux
+ Platform string `json:"platform"` // ex: ubuntu, linuxmint
+ PlatformFamily string `json:"platformFamily"` // ex: debian, rhel
+ PlatformVersion string `json:"platformVersion"` // version of the complete OS
+ KernelVersion string `json:"kernelVersion"` // version of the OS kernel (if available)
+ KernelArch string `json:"kernelArch"` // native cpu architecture queried at runtime, as returned by `uname -m` or empty string in case of error
+ VirtualizationSystem string `json:"virtualizationSystem"`
+ VirtualizationRole string `json:"virtualizationRole"` // guest or host
+ HostID string `json:"hostId"` // ex: uuid
+}
+
+type UserStat struct {
+ User string `json:"user"`
+ Terminal string `json:"terminal"`
+ Host string `json:"host"`
+ Started int `json:"started"`
+}
+
+type TemperatureStat struct {
+ SensorKey string `json:"sensorKey"`
+ Temperature float64 `json:"temperature"`
+ High float64 `json:"sensorHigh"`
+ Critical float64 `json:"sensorCritical"`
+}
+
+func (h InfoStat) String() string {
+ s, _ := json.Marshal(h)
+ return string(s)
+}
+
+func (u UserStat) String() string {
+ s, _ := json.Marshal(u)
+ return string(s)
+}
+
+func (t TemperatureStat) String() string {
+ s, _ := json.Marshal(t)
+ return string(s)
+}
+
+func Info() (*InfoStat, error) {
+ return InfoWithContext(context.Background())
+}
+
+func InfoWithContext(ctx context.Context) (*InfoStat, error) {
+ var err error
+ ret := &InfoStat{
+ OS: runtime.GOOS,
+ }
+
+ ret.Hostname, err = os.Hostname()
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.Platform, ret.PlatformFamily, ret.PlatformVersion, err = PlatformInformationWithContext(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.KernelVersion, err = KernelVersionWithContext(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.KernelArch, err = KernelArch()
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.VirtualizationSystem, ret.VirtualizationRole, err = VirtualizationWithContext(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.BootTime, err = BootTimeWithContext(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.Uptime, err = UptimeWithContext(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.Procs, err = numProcs(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ ret.HostID, err = HostIDWithContext(ctx)
+ if err != nil && !errors.Is(err, common.ErrNotImplementedError) {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+// BootTime returns the system boot time expressed in seconds since the epoch.
+func BootTime() (uint64, error) {
+ return BootTimeWithContext(context.Background())
+}
+
+func Uptime() (uint64, error) {
+ return UptimeWithContext(context.Background())
+}
+
+func Users() ([]UserStat, error) {
+ return UsersWithContext(context.Background())
+}
+
+func PlatformInformation() (string, string, string, error) {
+ return PlatformInformationWithContext(context.Background())
+}
+
+// HostID returns the unique host ID provided by the OS.
+func HostID() (string, error) {
+ return HostIDWithContext(context.Background())
+}
+
+func Virtualization() (string, string, error) {
+ return VirtualizationWithContext(context.Background())
+}
+
+func KernelVersion() (string, error) {
+ return KernelVersionWithContext(context.Background())
+}
+
+func SensorsTemperatures() ([]TemperatureStat, error) {
+ return SensorsTemperaturesWithContext(context.Background())
+}
+
+func timeSince(ts uint64) uint64 {
+ return uint64(time.Now().Unix()) - ts
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go
new file mode 100644
index 000000000..4dc2bba58
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go
@@ -0,0 +1,37 @@
+//go:build darwin || freebsd || openbsd
+// +build darwin freebsd openbsd
+
+package host
+
+import (
+ "context"
+ "sync/atomic"
+
+ "golang.org/x/sys/unix"
+)
+
+// cachedBootTime must be accessed via atomic.Load/StoreUint64
+var cachedBootTime uint64
+
+func BootTimeWithContext(ctx context.Context) (uint64, error) {
+ t := atomic.LoadUint64(&cachedBootTime)
+ if t != 0 {
+ return t, nil
+ }
+ tv, err := unix.SysctlTimeval("kern.boottime")
+ if err != nil {
+ return 0, err
+ }
+
+ atomic.StoreUint64(&cachedBootTime, uint64(tv.Sec))
+
+ return uint64(tv.Sec), nil
+}
+
+func UptimeWithContext(ctx context.Context) (uint64, error) {
+ boot, err := BootTimeWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return timeSince(boot), nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go
new file mode 100644
index 000000000..2f20fc616
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go
@@ -0,0 +1,129 @@
+//go:build darwin
+// +build darwin
+
+package host
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "io/ioutil"
+ "os"
+ "strings"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/process"
+ "golang.org/x/sys/unix"
+)
+
+// from utmpx.h
+const user_PROCESS = 7
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ out, err := invoke.CommandWithContext(ctx, "ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+ if err != nil {
+ return "", err
+ }
+
+ for _, line := range strings.Split(string(out), "\n") {
+ if strings.Contains(line, "IOPlatformUUID") {
+ parts := strings.SplitAfter(line, `" = "`)
+ if len(parts) == 2 {
+ uuid := strings.TrimRight(parts[1], `"`)
+ return strings.ToLower(uuid), nil
+ }
+ }
+ }
+
+ return "", errors.New("cannot find host id")
+}
+
+func numProcs(ctx context.Context) (uint64, error) {
+ procs, err := process.PidsWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(procs)), nil
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ utmpfile := "/var/run/utmpx"
+ var ret []UserStat
+
+ file, err := os.Open(utmpfile)
+ if err != nil {
+ return ret, err
+ }
+ defer file.Close()
+
+ buf, err := ioutil.ReadAll(file)
+ if err != nil {
+ return ret, err
+ }
+
+ u := Utmpx{}
+ entrySize := int(unsafe.Sizeof(u))
+ count := len(buf) / entrySize
+
+ for i := 0; i < count; i++ {
+ b := buf[i*entrySize : i*entrySize+entrySize]
+
+ var u Utmpx
+ br := bytes.NewReader(b)
+ err := binary.Read(br, binary.LittleEndian, &u)
+ if err != nil {
+ continue
+ }
+ if u.Type != user_PROCESS {
+ continue
+ }
+ user := UserStat{
+ User: common.IntToString(u.User[:]),
+ Terminal: common.IntToString(u.Line[:]),
+ Host: common.IntToString(u.Host[:]),
+ Started: int(u.Tv.Sec),
+ }
+ ret = append(ret, user)
+ }
+
+ return ret, nil
+}
+
+func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) {
+ platform := ""
+ family := ""
+ pver := ""
+
+ p, err := unix.Sysctl("kern.ostype")
+ if err == nil {
+ platform = strings.ToLower(p)
+ }
+
+ out, err := invoke.CommandWithContext(ctx, "sw_vers", "-productVersion")
+ if err == nil {
+ pver = strings.ToLower(strings.TrimSpace(string(out)))
+ }
+
+ // check if the macos server version file exists
+ _, err = os.Stat("/System/Library/CoreServices/ServerVersion.plist")
+
+ // server file doesn't exist
+ if os.IsNotExist(err) {
+ family = "Standalone Workstation"
+ } else {
+ family = "Server"
+ }
+
+ return platform, family, pver, nil
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return "", "", common.ErrNotImplementedError
+}
+
+func KernelVersionWithContext(ctx context.Context) (string, error) {
+ version, err := unix.Sysctl("kern.osrelease")
+ return strings.ToLower(version), err
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go
new file mode 100644
index 000000000..8caeed2e8
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go
@@ -0,0 +1,20 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_darwin.go
+
+package host
+
+type Utmpx struct {
+ User [256]int8
+ ID [4]int8
+ Line [32]int8
+ Pid int32
+ Type int16
+ Pad_cgo_0 [6]byte
+ Tv Timeval
+ Host [256]int8
+ Pad [16]uint32
+}
+
+type Timeval struct {
+ Sec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go
new file mode 100644
index 000000000..293bd4df8
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go
@@ -0,0 +1,23 @@
+//go:build darwin && arm64
+// +build darwin,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs host/types_darwin.go
+
+package host
+
+type Utmpx struct {
+ User [256]int8
+ Id [4]int8
+ Line [32]int8
+ Pid int32
+ Type int16
+ Tv Timeval
+ Host [256]int8
+ Pad [16]uint32
+}
+type Timeval struct {
+ Sec int64
+ Usec int32
+ Pad_cgo_0 [4]byte
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go
new file mode 100644
index 000000000..ffdc7b78f
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go
@@ -0,0 +1,47 @@
+//go:build darwin && cgo
+// +build darwin,cgo
+
+package host
+
+// #cgo LDFLAGS: -framework IOKit
+// #include "smc_darwin.h"
+import "C"
+import "context"
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ temperatureKeys := []string{
+ C.AMBIENT_AIR_0,
+ C.AMBIENT_AIR_1,
+ C.CPU_0_DIODE,
+ C.CPU_0_HEATSINK,
+ C.CPU_0_PROXIMITY,
+ C.ENCLOSURE_BASE_0,
+ C.ENCLOSURE_BASE_1,
+ C.ENCLOSURE_BASE_2,
+ C.ENCLOSURE_BASE_3,
+ C.GPU_0_DIODE,
+ C.GPU_0_HEATSINK,
+ C.GPU_0_PROXIMITY,
+ C.HARD_DRIVE_BAY,
+ C.MEMORY_SLOT_0,
+ C.MEMORY_SLOTS_PROXIMITY,
+ C.NORTHBRIDGE,
+ C.NORTHBRIDGE_DIODE,
+ C.NORTHBRIDGE_PROXIMITY,
+ C.THUNDERBOLT_0,
+ C.THUNDERBOLT_1,
+ C.WIRELESS_MODULE,
+ }
+ var temperatures []TemperatureStat
+
+ C.gopsutil_v3_open_smc()
+ defer C.gopsutil_v3_close_smc()
+
+ for _, key := range temperatureKeys {
+ temperatures = append(temperatures, TemperatureStat{
+ SensorKey: key,
+ Temperature: float64(C.gopsutil_v3_get_temperature(C.CString(key))),
+ })
+ }
+ return temperatures, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go
new file mode 100644
index 000000000..6285ba94d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go
@@ -0,0 +1,14 @@
+//go:build darwin && !cgo
+// +build darwin,!cgo
+
+package host
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ return []TemperatureStat{}, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go
new file mode 100644
index 000000000..585250f9a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go
@@ -0,0 +1,50 @@
+//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows
+// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows
+
+package host
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func numProcs(ctx context.Context) (uint64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func BootTimeWithContext(ctx context.Context) (uint64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func UptimeWithContext(ctx context.Context) (uint64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ return []UserStat{}, common.ErrNotImplementedError
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return "", "", common.ErrNotImplementedError
+}
+
+func KernelVersionWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) {
+ return "", "", "", common.ErrNotImplementedError
+}
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ return []TemperatureStat{}, common.ErrNotImplementedError
+}
+
+func KernelArch() (string, error) {
+ return "", common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go
new file mode 100644
index 000000000..2c9aa9d0d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go
@@ -0,0 +1,151 @@
+//go:build freebsd
+// +build freebsd
+
+package host
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "io/ioutil"
+ "math"
+ "os"
+ "strings"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/process"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ UTNameSize = 16 /* see MAXLOGNAME in */
+ UTLineSize = 8
+ UTHostSize = 16
+)
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ uuid, err := unix.Sysctl("kern.hostuuid")
+ if err != nil {
+ return "", err
+ }
+ return strings.ToLower(uuid), err
+}
+
+func numProcs(ctx context.Context) (uint64, error) {
+ procs, err := process.PidsWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(procs)), nil
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ utmpfile := "/var/run/utx.active"
+ if !common.PathExists(utmpfile) {
+ utmpfile = "/var/run/utmp" // before 9.0
+ return getUsersFromUtmp(utmpfile)
+ }
+
+ var ret []UserStat
+ file, err := os.Open(utmpfile)
+ if err != nil {
+ return ret, err
+ }
+ defer file.Close()
+
+ buf, err := ioutil.ReadAll(file)
+ if err != nil {
+ return ret, err
+ }
+
+ entrySize := sizeOfUtmpx
+ count := len(buf) / entrySize
+
+ for i := 0; i < count; i++ {
+ b := buf[i*sizeOfUtmpx : (i+1)*sizeOfUtmpx]
+ var u Utmpx
+ br := bytes.NewReader(b)
+ err := binary.Read(br, binary.BigEndian, &u)
+ if err != nil || u.Type != 4 {
+ continue
+ }
+ sec := math.Floor(float64(u.Tv) / 1000000)
+ user := UserStat{
+ User: common.IntToString(u.User[:]),
+ Terminal: common.IntToString(u.Line[:]),
+ Host: common.IntToString(u.Host[:]),
+ Started: int(sec),
+ }
+
+ ret = append(ret, user)
+ }
+
+ return ret, nil
+}
+
+func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) {
+ platform, err := unix.Sysctl("kern.ostype")
+ if err != nil {
+ return "", "", "", err
+ }
+
+ version, err := unix.Sysctl("kern.osrelease")
+ if err != nil {
+ return "", "", "", err
+ }
+
+ return strings.ToLower(platform), "", strings.ToLower(version), nil
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return "", "", common.ErrNotImplementedError
+}
+
+// before 9.0
+func getUsersFromUtmp(utmpfile string) ([]UserStat, error) {
+ var ret []UserStat
+ file, err := os.Open(utmpfile)
+ if err != nil {
+ return ret, err
+ }
+ defer file.Close()
+
+ buf, err := ioutil.ReadAll(file)
+ if err != nil {
+ return ret, err
+ }
+
+ u := Utmp{}
+ entrySize := int(unsafe.Sizeof(u))
+ count := len(buf) / entrySize
+
+ for i := 0; i < count; i++ {
+ b := buf[i*entrySize : i*entrySize+entrySize]
+ var u Utmp
+ br := bytes.NewReader(b)
+ err := binary.Read(br, binary.LittleEndian, &u)
+ if err != nil || u.Time == 0 {
+ continue
+ }
+ user := UserStat{
+ User: common.IntToString(u.Name[:]),
+ Terminal: common.IntToString(u.Line[:]),
+ Host: common.IntToString(u.Host[:]),
+ Started: int(u.Time),
+ }
+
+ ret = append(ret, user)
+ }
+
+ return ret, nil
+}
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ return []TemperatureStat{}, common.ErrNotImplementedError
+}
+
+func KernelVersionWithContext(ctx context.Context) (string, error) {
+ _, _, version, err := PlatformInformationWithContext(ctx)
+ return version, err
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go
new file mode 100644
index 000000000..88453d2a2
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go
@@ -0,0 +1,37 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_freebsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmpx = 0xc5
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [16]int8
+ Host [16]int8
+ Time int32
+}
+
+type Utmpx struct {
+ Type uint8
+ Tv uint64
+ Id [8]int8
+ Pid uint32
+ User [32]int8
+ Line [16]int8
+ Host [128]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go
new file mode 100644
index 000000000..8af74b0fe
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go
@@ -0,0 +1,37 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_freebsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmpx = 0xc5
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [16]int8
+ Host [16]int8
+ Time int32
+}
+
+type Utmpx struct {
+ Type uint8
+ Tv uint64
+ Id [8]int8
+ Pid uint32
+ User [32]int8
+ Line [16]int8
+ Host [128]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go
new file mode 100644
index 000000000..f7d6ede55
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go
@@ -0,0 +1,37 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_freebsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmpx = 0xc5
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [16]int8
+ Host [16]int8
+ Time int32
+}
+
+type Utmpx struct {
+ Type uint8
+ Tv uint64
+ Id [8]int8
+ Pid uint32
+ User [32]int8
+ Line [16]int8
+ Host [128]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go
new file mode 100644
index 000000000..41bec3c11
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go
@@ -0,0 +1,40 @@
+//go:build freebsd && arm64
+// +build freebsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs host/types_freebsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmpx = 0xc5
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [16]int8
+ Host [16]int8
+ Time int32
+}
+
+type Utmpx struct {
+ Type uint8
+ Tv uint64
+ Id [8]int8
+ Pid uint32
+ User [32]int8
+ Line [16]int8
+ Host [128]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go
new file mode 100644
index 000000000..940415c9c
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go
@@ -0,0 +1,518 @@
+//go:build linux
+// +build linux
+
+package host
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+type lsbStruct struct {
+ ID string
+ Release string
+ Codename string
+ Description string
+}
+
+// from utmp.h
+const (
+ user_PROCESS = 7
+
+ hostTemperatureScale = 1000.0
+)
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ sysProductUUID := common.HostSys("class/dmi/id/product_uuid")
+ machineID := common.HostEtc("machine-id")
+ procSysKernelRandomBootID := common.HostProc("sys/kernel/random/boot_id")
+ switch {
+ // In order to read this file, needs to be supported by kernel/arch and run as root
+ // so having fallback is important
+ case common.PathExists(sysProductUUID):
+ lines, err := common.ReadLines(sysProductUUID)
+ if err == nil && len(lines) > 0 && lines[0] != "" {
+ return strings.ToLower(lines[0]), nil
+ }
+ fallthrough
+ // Fallback on GNU Linux systems with systemd, readable by everyone
+ case common.PathExists(machineID):
+ lines, err := common.ReadLines(machineID)
+ if err == nil && len(lines) > 0 && len(lines[0]) == 32 {
+ st := lines[0]
+ return fmt.Sprintf("%s-%s-%s-%s-%s", st[0:8], st[8:12], st[12:16], st[16:20], st[20:32]), nil
+ }
+ fallthrough
+ // Not stable between reboot, but better than nothing
+ default:
+ lines, err := common.ReadLines(procSysKernelRandomBootID)
+ if err == nil && len(lines) > 0 && lines[0] != "" {
+ return strings.ToLower(lines[0]), nil
+ }
+ }
+
+ return "", nil
+}
+
+func numProcs(ctx context.Context) (uint64, error) {
+ return common.NumProcs()
+}
+
+func BootTimeWithContext(ctx context.Context) (uint64, error) {
+ return common.BootTimeWithContext(ctx)
+}
+
+func UptimeWithContext(ctx context.Context) (uint64, error) {
+ sysinfo := &unix.Sysinfo_t{}
+ if err := unix.Sysinfo(sysinfo); err != nil {
+ return 0, err
+ }
+ return uint64(sysinfo.Uptime), nil
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ utmpfile := common.HostVar("run/utmp")
+
+ file, err := os.Open(utmpfile)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ buf, err := ioutil.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+
+ count := len(buf) / sizeOfUtmp
+
+ ret := make([]UserStat, 0, count)
+
+ for i := 0; i < count; i++ {
+ b := buf[i*sizeOfUtmp : (i+1)*sizeOfUtmp]
+
+ var u utmp
+ br := bytes.NewReader(b)
+ err := binary.Read(br, binary.LittleEndian, &u)
+ if err != nil {
+ continue
+ }
+ if u.Type != user_PROCESS {
+ continue
+ }
+ user := UserStat{
+ User: common.IntToString(u.User[:]),
+ Terminal: common.IntToString(u.Line[:]),
+ Host: common.IntToString(u.Host[:]),
+ Started: int(u.Tv.Sec),
+ }
+ ret = append(ret, user)
+ }
+
+ return ret, nil
+}
+
+func getlsbStruct() (*lsbStruct, error) {
+ ret := &lsbStruct{}
+ if common.PathExists(common.HostEtc("lsb-release")) {
+ contents, err := common.ReadLines(common.HostEtc("lsb-release"))
+ if err != nil {
+ return ret, err // return empty
+ }
+ for _, line := range contents {
+ field := strings.Split(line, "=")
+ if len(field) < 2 {
+ continue
+ }
+ switch field[0] {
+ case "DISTRIB_ID":
+ ret.ID = field[1]
+ case "DISTRIB_RELEASE":
+ ret.Release = field[1]
+ case "DISTRIB_CODENAME":
+ ret.Codename = field[1]
+ case "DISTRIB_DESCRIPTION":
+ ret.Description = field[1]
+ }
+ }
+ } else if common.PathExists("/usr/bin/lsb_release") {
+ out, err := invoke.Command("/usr/bin/lsb_release")
+ if err != nil {
+ return ret, err
+ }
+ for _, line := range strings.Split(string(out), "\n") {
+ field := strings.Split(line, ":")
+ if len(field) < 2 {
+ continue
+ }
+ switch field[0] {
+ case "Distributor ID":
+ ret.ID = field[1]
+ case "Release":
+ ret.Release = field[1]
+ case "Codename":
+ ret.Codename = field[1]
+ case "Description":
+ ret.Description = field[1]
+ }
+ }
+
+ }
+
+ return ret, nil
+}
+
+func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) {
+ lsb, err := getlsbStruct()
+ if err != nil {
+ lsb = &lsbStruct{}
+ }
+
+ if common.PathExistsWithContents(common.HostEtc("oracle-release")) {
+ platform = "oracle"
+ contents, err := common.ReadLines(common.HostEtc("oracle-release"))
+ if err == nil {
+ version = getRedhatishVersion(contents)
+ }
+
+ } else if common.PathExistsWithContents(common.HostEtc("enterprise-release")) {
+ platform = "oracle"
+ contents, err := common.ReadLines(common.HostEtc("enterprise-release"))
+ if err == nil {
+ version = getRedhatishVersion(contents)
+ }
+ } else if common.PathExistsWithContents(common.HostEtc("slackware-version")) {
+ platform = "slackware"
+ contents, err := common.ReadLines(common.HostEtc("slackware-version"))
+ if err == nil {
+ version = getSlackwareVersion(contents)
+ }
+ } else if common.PathExistsWithContents(common.HostEtc("debian_version")) {
+ if lsb.ID == "Ubuntu" {
+ platform = "ubuntu"
+ version = lsb.Release
+ } else if lsb.ID == "LinuxMint" {
+ platform = "linuxmint"
+ version = lsb.Release
+ } else {
+ if common.PathExistsWithContents("/usr/bin/raspi-config") {
+ platform = "raspbian"
+ } else {
+ platform = "debian"
+ }
+ contents, err := common.ReadLines(common.HostEtc("debian_version"))
+ if err == nil && len(contents) > 0 && contents[0] != "" {
+ version = contents[0]
+ }
+ }
+ } else if common.PathExists(common.HostEtc("neokylin-release")) {
+ contents, err := common.ReadLines(common.HostEtc("neokylin-release"))
+ if err == nil {
+ version = getRedhatishVersion(contents)
+ platform = getRedhatishPlatform(contents)
+ }
+ } else if common.PathExists(common.HostEtc("redhat-release")) {
+ contents, err := common.ReadLines(common.HostEtc("redhat-release"))
+ if err == nil {
+ version = getRedhatishVersion(contents)
+ platform = getRedhatishPlatform(contents)
+ }
+ } else if common.PathExists(common.HostEtc("system-release")) {
+ contents, err := common.ReadLines(common.HostEtc("system-release"))
+ if err == nil {
+ version = getRedhatishVersion(contents)
+ platform = getRedhatishPlatform(contents)
+ }
+ } else if common.PathExists(common.HostEtc("gentoo-release")) {
+ platform = "gentoo"
+ contents, err := common.ReadLines(common.HostEtc("gentoo-release"))
+ if err == nil {
+ version = getRedhatishVersion(contents)
+ }
+ } else if common.PathExists(common.HostEtc("SuSE-release")) {
+ contents, err := common.ReadLines(common.HostEtc("SuSE-release"))
+ if err == nil {
+ version = getSuseVersion(contents)
+ platform = getSusePlatform(contents)
+ }
+ // TODO: slackware detecion
+ } else if common.PathExists(common.HostEtc("arch-release")) {
+ platform = "arch"
+ version = lsb.Release
+ } else if common.PathExists(common.HostEtc("alpine-release")) {
+ platform = "alpine"
+ contents, err := common.ReadLines(common.HostEtc("alpine-release"))
+ if err == nil && len(contents) > 0 && contents[0] != "" {
+ version = contents[0]
+ }
+ } else if common.PathExists(common.HostEtc("os-release")) {
+ p, v, err := common.GetOSRelease()
+ if err == nil {
+ platform = p
+ version = v
+ }
+ } else if lsb.ID == "RedHat" {
+ platform = "redhat"
+ version = lsb.Release
+ } else if lsb.ID == "Amazon" {
+ platform = "amazon"
+ version = lsb.Release
+ } else if lsb.ID == "ScientificSL" {
+ platform = "scientific"
+ version = lsb.Release
+ } else if lsb.ID == "XenServer" {
+ platform = "xenserver"
+ version = lsb.Release
+ } else if lsb.ID != "" {
+ platform = strings.ToLower(lsb.ID)
+ version = lsb.Release
+ }
+
+ platform = strings.Trim(platform, `"`)
+
+ switch platform {
+ case "debian", "ubuntu", "linuxmint", "raspbian":
+ family = "debian"
+ case "fedora":
+ family = "fedora"
+ case "oracle", "centos", "redhat", "scientific", "enterpriseenterprise", "amazon", "xenserver", "cloudlinux", "ibm_powerkvm", "rocky", "almalinux":
+ family = "rhel"
+ case "suse", "opensuse", "opensuse-leap", "opensuse-tumbleweed", "opensuse-tumbleweed-kubic", "sles", "sled", "caasp":
+ family = "suse"
+ case "gentoo":
+ family = "gentoo"
+ case "slackware":
+ family = "slackware"
+ case "arch":
+ family = "arch"
+ case "exherbo":
+ family = "exherbo"
+ case "alpine":
+ family = "alpine"
+ case "coreos":
+ family = "coreos"
+ case "solus":
+ family = "solus"
+ case "neokylin":
+ family = "neokylin"
+ }
+
+ return platform, family, version, nil
+}
+
+func KernelVersionWithContext(ctx context.Context) (version string, err error) {
+ var utsname unix.Utsname
+ err = unix.Uname(&utsname)
+ if err != nil {
+ return "", err
+ }
+ return string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), nil
+}
+
+func getSlackwareVersion(contents []string) string {
+ c := strings.ToLower(strings.Join(contents, ""))
+ c = strings.Replace(c, "slackware ", "", 1)
+ return c
+}
+
+func getRedhatishVersion(contents []string) string {
+ c := strings.ToLower(strings.Join(contents, ""))
+
+ if strings.Contains(c, "rawhide") {
+ return "rawhide"
+ }
+ if matches := regexp.MustCompile(`release (\w[\d.]*)`).FindStringSubmatch(c); matches != nil {
+ return matches[1]
+ }
+ return ""
+}
+
+func getRedhatishPlatform(contents []string) string {
+ c := strings.ToLower(strings.Join(contents, ""))
+
+ if strings.Contains(c, "red hat") {
+ return "redhat"
+ }
+ f := strings.Split(c, " ")
+
+ return f[0]
+}
+
+func getSuseVersion(contents []string) string {
+ version := ""
+ for _, line := range contents {
+ if matches := regexp.MustCompile(`VERSION = ([\d.]+)`).FindStringSubmatch(line); matches != nil {
+ version = matches[1]
+ } else if matches := regexp.MustCompile(`PATCHLEVEL = ([\d]+)`).FindStringSubmatch(line); matches != nil {
+ version = version + "." + matches[1]
+ }
+ }
+ return version
+}
+
+func getSusePlatform(contents []string) string {
+ c := strings.ToLower(strings.Join(contents, ""))
+ if strings.Contains(c, "opensuse") {
+ return "opensuse"
+ }
+ return "suse"
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return common.VirtualizationWithContext(ctx)
+}
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ var err error
+
+ var files []string
+
+ temperatures := make([]TemperatureStat, 0)
+
+ // Only the temp*_input file provides current temperature
+ // value in millidegree Celsius as reported by the temperature to the device:
+ // https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
+ if files, err = filepath.Glob(common.HostSys("/class/hwmon/hwmon*/temp*_input")); err != nil {
+ return temperatures, err
+ }
+
+ if len(files) == 0 {
+ // CentOS has an intermediate /device directory:
+ // https://github.com/giampaolo/psutil/issues/971
+ if files, err = filepath.Glob(common.HostSys("/class/hwmon/hwmon*/device/temp*_input")); err != nil {
+ return temperatures, err
+ }
+ }
+
+ var warns Warnings
+
+ if len(files) == 0 { // handle distributions without hwmon, like raspbian #391, parse legacy thermal_zone files
+ files, err = filepath.Glob(common.HostSys("/class/thermal/thermal_zone*/"))
+ if err != nil {
+ return temperatures, err
+ }
+ for _, file := range files {
+ // Get the name of the temperature you are reading
+ name, err := ioutil.ReadFile(filepath.Join(file, "type"))
+ if err != nil {
+ warns.Add(err)
+ continue
+ }
+ // Get the temperature reading
+ current, err := ioutil.ReadFile(filepath.Join(file, "temp"))
+ if err != nil {
+ warns.Add(err)
+ continue
+ }
+ temperature, err := strconv.ParseInt(strings.TrimSpace(string(current)), 10, 64)
+ if err != nil {
+ warns.Add(err)
+ continue
+ }
+
+ temperatures = append(temperatures, TemperatureStat{
+ SensorKey: strings.TrimSpace(string(name)),
+ Temperature: float64(temperature) / 1000.0,
+ })
+ }
+ return temperatures, warns.Reference()
+ }
+
+ temperatures = make([]TemperatureStat, 0, len(files))
+
+ // example directory
+ // device/ temp1_crit_alarm temp2_crit_alarm temp3_crit_alarm temp4_crit_alarm temp5_crit_alarm temp6_crit_alarm temp7_crit_alarm
+ // name temp1_input temp2_input temp3_input temp4_input temp5_input temp6_input temp7_input
+ // power/ temp1_label temp2_label temp3_label temp4_label temp5_label temp6_label temp7_label
+ // subsystem/ temp1_max temp2_max temp3_max temp4_max temp5_max temp6_max temp7_max
+ // temp1_crit temp2_crit temp3_crit temp4_crit temp5_crit temp6_crit temp7_crit uevent
+ for _, file := range files {
+ var raw []byte
+
+ var temperature float64
+
+ // Get the base directory location
+ directory := filepath.Dir(file)
+
+ // Get the base filename prefix like temp1
+ basename := strings.Split(filepath.Base(file), "_")[0]
+
+ // Get the base path like /temp1
+ basepath := filepath.Join(directory, basename)
+
+ // Get the label of the temperature you are reading
+ label := ""
+
+ if raw, _ = ioutil.ReadFile(basepath + "_label"); len(raw) != 0 {
+ // Format the label from "Core 0" to "core_0"
+ label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(raw))), " "), "_")
+ }
+
+ // Get the name of the temperature you are reading
+ if raw, err = ioutil.ReadFile(filepath.Join(directory, "name")); err != nil {
+ warns.Add(err)
+ continue
+ }
+
+ name := strings.TrimSpace(string(raw))
+
+ if label != "" {
+ name = name + "_" + label
+ }
+
+ // Get the temperature reading
+ if raw, err = ioutil.ReadFile(file); err != nil {
+ warns.Add(err)
+ continue
+ }
+
+ if temperature, err = strconv.ParseFloat(strings.TrimSpace(string(raw)), 64); err != nil {
+ warns.Add(err)
+ continue
+ }
+
+ // Add discovered temperature sensor to the list
+ temperatures = append(temperatures, TemperatureStat{
+ SensorKey: name,
+ Temperature: temperature / hostTemperatureScale,
+ High: optionalValueReadFromFile(basepath+"_max") / hostTemperatureScale,
+ Critical: optionalValueReadFromFile(basepath+"_crit") / hostTemperatureScale,
+ })
+ }
+
+ return temperatures, warns.Reference()
+}
+
+func optionalValueReadFromFile(filename string) float64 {
+ var raw []byte
+
+ var err error
+
+ var value float64
+
+ // Check if file exists
+ if _, err := os.Stat(filename); os.IsNotExist(err) {
+ return 0
+ }
+
+ if raw, err = ioutil.ReadFile(filename); err != nil {
+ return 0
+ }
+
+ if value, err = strconv.ParseFloat(strings.TrimSpace(string(raw)), 64); err != nil {
+ return 0
+ }
+
+ return value
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go
new file mode 100644
index 000000000..46e0c5d5a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go
@@ -0,0 +1,47 @@
+// ATTENTION - FILE MANUAL FIXED AFTER CGO.
+// Fixed line: Tv _Ctype_struct_timeval -> Tv UtTv
+// Created by cgo -godefs, MANUAL FIXED
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ ID [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv UtTv
+ Addr_v6 [4]int32
+ X__unused [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type UtTv struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go
new file mode 100644
index 000000000..1e574482f
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go
@@ -0,0 +1,50 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv _Ctype_struct___0
+ Addr_v6 [4]int32
+ X__glibc_reserved [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type _Ctype_struct___0 struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go
new file mode 100644
index 000000000..7abbbb8a3
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go
@@ -0,0 +1,45 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go | sed "s/uint8/int8/g"
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__glibc_reserved [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go
new file mode 100644
index 000000000..eebef55cd
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go
@@ -0,0 +1,45 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__glibc_reserved [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int64
+ Usec int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go
new file mode 100644
index 000000000..50207e5bc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go
@@ -0,0 +1,45 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__unused [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go
new file mode 100644
index 000000000..50207e5bc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go
@@ -0,0 +1,45 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__unused [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go
new file mode 100644
index 000000000..50207e5bc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go
@@ -0,0 +1,45 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__unused [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go
new file mode 100644
index 000000000..50207e5bc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go
@@ -0,0 +1,45 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__unused [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go
new file mode 100644
index 000000000..51f5bee11
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go
@@ -0,0 +1,48 @@
+//go:build linux && ppc64le
+// +build linux,ppc64le
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__glibc_reserved [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int64
+ Usec int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go
new file mode 100644
index 000000000..bb03a0b39
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go
@@ -0,0 +1,49 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv _Ctype_struct___0
+ Addr_v6 [4]int32
+ X__glibc_reserved [20]uint8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type _Ctype_struct___0 struct {
+ Sec int32
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go
new file mode 100644
index 000000000..6ea432a61
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go
@@ -0,0 +1,48 @@
+//go:build linux && s390x
+// +build linux,s390x
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x180
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type utmp struct {
+ Type int16
+ Pad_cgo_0 [2]byte
+ Pid int32
+ Line [32]int8
+ Id [4]int8
+ User [32]int8
+ Host [256]int8
+ Exit exit_status
+ Session int32
+ Tv timeval
+ Addr_v6 [4]int32
+ X__glibc_reserved [20]int8
+}
+
+type exit_status struct {
+ Termination int16
+ Exit int16
+}
+
+type timeval struct {
+ Sec int64
+ Usec int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go
new file mode 100644
index 000000000..569de4abd
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go
@@ -0,0 +1,105 @@
+//go:build openbsd
+// +build openbsd
+
+package host
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "io/ioutil"
+ "os"
+ "strings"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/process"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ UTNameSize = 32 /* see MAXLOGNAME in */
+ UTLineSize = 8
+ UTHostSize = 16
+)
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func numProcs(ctx context.Context) (uint64, error) {
+ procs, err := process.PidsWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(procs)), nil
+}
+
+func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) {
+ platform := ""
+ family := ""
+ version := ""
+
+ p, err := unix.Sysctl("kern.ostype")
+ if err == nil {
+ platform = strings.ToLower(p)
+ }
+ v, err := unix.Sysctl("kern.osrelease")
+ if err == nil {
+ version = strings.ToLower(v)
+ }
+
+ return platform, family, version, nil
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return "", "", common.ErrNotImplementedError
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ var ret []UserStat
+ utmpfile := "/var/run/utmp"
+ file, err := os.Open(utmpfile)
+ if err != nil {
+ return ret, err
+ }
+ defer file.Close()
+
+ buf, err := ioutil.ReadAll(file)
+ if err != nil {
+ return ret, err
+ }
+
+ u := Utmp{}
+ entrySize := int(unsafe.Sizeof(u))
+ count := len(buf) / entrySize
+
+ for i := 0; i < count; i++ {
+ b := buf[i*entrySize : i*entrySize+entrySize]
+ var u Utmp
+ br := bytes.NewReader(b)
+ err := binary.Read(br, binary.LittleEndian, &u)
+ if err != nil || u.Time == 0 || u.Name[0] == 0 {
+ continue
+ }
+ user := UserStat{
+ User: common.IntToString(u.Name[:]),
+ Terminal: common.IntToString(u.Line[:]),
+ Host: common.IntToString(u.Host[:]),
+ Started: int(u.Time),
+ }
+
+ ret = append(ret, user)
+ }
+
+ return ret, nil
+}
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ return []TemperatureStat{}, common.ErrNotImplementedError
+}
+
+func KernelVersionWithContext(ctx context.Context) (string, error) {
+ _, _, version, err := PlatformInformationWithContext(ctx)
+ return version, err
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go
new file mode 100644
index 000000000..b299d7ae4
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go
@@ -0,0 +1,34 @@
+//go:build openbsd && 386
+// +build openbsd,386
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs host/types_openbsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x130
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [32]int8
+ Host [256]int8
+ Time int64
+}
+type Timeval struct {
+ Sec int64
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go
new file mode 100644
index 000000000..2d23b9b71
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go
@@ -0,0 +1,32 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_openbsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x130
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [32]int8
+ Host [256]int8
+ Time int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go
new file mode 100644
index 000000000..f0ac57d0b
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go
@@ -0,0 +1,34 @@
+//go:build openbsd && arm
+// +build openbsd,arm
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs host/types_openbsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x130
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [32]int8
+ Host [256]int8
+ Time int64
+}
+type Timeval struct {
+ Sec int64
+ Usec int32
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go
new file mode 100644
index 000000000..20fb42dd7
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go
@@ -0,0 +1,34 @@
+//go:build openbsd && arm64
+// +build openbsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs host/types_openbsd.go
+
+package host
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ sizeOfUtmp = 0x130
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Utmp struct {
+ Line [8]int8
+ Name [32]int8
+ Host [256]int8
+ Time int64
+}
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go
new file mode 100644
index 000000000..89e63781e
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go
@@ -0,0 +1,16 @@
+//go:build linux || freebsd || openbsd || darwin || solaris
+// +build linux freebsd openbsd darwin solaris
+
+package host
+
+import (
+ "bytes"
+
+ "golang.org/x/sys/unix"
+)
+
+func KernelArch() (string, error) {
+ var utsname unix.Utsname
+ err := unix.Uname(&utsname)
+ return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), err
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go
new file mode 100644
index 000000000..7d3625acb
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go
@@ -0,0 +1,202 @@
+package host
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/csv"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ platform, err := parseReleaseFile()
+ if err != nil {
+ return "", err
+ }
+
+ if platform == "SmartOS" {
+ // If everything works, use the current zone ID as the HostID if present.
+ out, err := invoke.CommandWithContext(ctx, "zonename")
+ if err == nil {
+ sc := bufio.NewScanner(bytes.NewReader(out))
+ for sc.Scan() {
+ line := sc.Text()
+
+ // If we're in the global zone, rely on the hostname.
+ if line == "global" {
+ hostname, err := os.Hostname()
+ if err == nil {
+ return hostname, nil
+ }
+ } else {
+ return strings.TrimSpace(line), nil
+ }
+ }
+ }
+ }
+
+ // If HostID is still unknown, use hostid(1), which can lie to callers but at
+ // this point there are no hardware facilities available. This behavior
+ // matches that of other supported OSes.
+ out, err := invoke.CommandWithContext(ctx, "hostid")
+ if err == nil {
+ sc := bufio.NewScanner(bytes.NewReader(out))
+ for sc.Scan() {
+ line := sc.Text()
+ return strings.TrimSpace(line), nil
+ }
+ }
+
+ return "", nil
+}
+
+// Count number of processes based on the number of entries in /proc
+func numProcs(ctx context.Context) (uint64, error) {
+ dirs, err := ioutil.ReadDir("/proc")
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(dirs)), nil
+}
+
+var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`)
+
+func BootTimeWithContext(ctx context.Context) (uint64, error) {
+ out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "unix:0:system_misc:boot_time")
+ if err != nil {
+ return 0, err
+ }
+
+ kstats := kstatMatch.FindAllStringSubmatch(string(out), -1)
+ if len(kstats) != 1 {
+ return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats))
+ }
+
+ return strconv.ParseUint(kstats[0][2], 10, 64)
+}
+
+func UptimeWithContext(ctx context.Context) (uint64, error) {
+ bootTime, err := BootTime()
+ if err != nil {
+ return 0, err
+ }
+ return timeSince(bootTime), nil
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ return []UserStat{}, common.ErrNotImplementedError
+}
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ var ret []TemperatureStat
+
+ out, err := invoke.CommandWithContext(ctx, "ipmitool", "-c", "sdr", "list")
+ if err != nil {
+ return ret, err
+ }
+
+ r := csv.NewReader(strings.NewReader(string(out)))
+ // Output may contain errors, e.g. "bmc_send_cmd: Permission denied", don't expect a consistent number of records
+ r.FieldsPerRecord = -1
+ for {
+ record, err := r.Read()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return ret, err
+ }
+ // CPU1 Temp,40,degrees C,ok
+ if len(record) < 3 || record[1] == "" || record[2] != "degrees C" {
+ continue
+ }
+ v, err := strconv.ParseFloat(record[1], 64)
+ if err != nil {
+ return ret, err
+ }
+ ts := TemperatureStat{
+ SensorKey: strings.TrimSuffix(record[0], " Temp"),
+ Temperature: v,
+ }
+ ret = append(ret, ts)
+ }
+
+ return ret, nil
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return "", "", common.ErrNotImplementedError
+}
+
+// Find distribution name from /etc/release
+func parseReleaseFile() (string, error) {
+ b, err := ioutil.ReadFile("/etc/release")
+ if err != nil {
+ return "", err
+ }
+ s := string(b)
+ s = strings.TrimSpace(s)
+
+ var platform string
+
+ switch {
+ case strings.HasPrefix(s, "SmartOS"):
+ platform = "SmartOS"
+ case strings.HasPrefix(s, "OpenIndiana"):
+ platform = "OpenIndiana"
+ case strings.HasPrefix(s, "OmniOS"):
+ platform = "OmniOS"
+ case strings.HasPrefix(s, "Open Storage"):
+ platform = "NexentaStor"
+ case strings.HasPrefix(s, "Solaris"):
+ platform = "Solaris"
+ case strings.HasPrefix(s, "Oracle Solaris"):
+ platform = "Solaris"
+ default:
+ platform = strings.Fields(s)[0]
+ }
+
+ return platform, nil
+}
+
+// parseUnameOutput returns platformFamily, kernelVersion and platformVersion
+func parseUnameOutput(ctx context.Context) (string, string, string, error) {
+ out, err := invoke.CommandWithContext(ctx, "uname", "-srv")
+ if err != nil {
+ return "", "", "", err
+ }
+
+ fields := strings.Fields(string(out))
+ if len(fields) < 3 {
+ return "", "", "", fmt.Errorf("malformed `uname` output")
+ }
+
+ return fields[0], fields[1], fields[2], nil
+}
+
+func KernelVersionWithContext(ctx context.Context) (string, error) {
+ _, kernelVersion, _, err := parseUnameOutput(ctx)
+ return kernelVersion, err
+}
+
+func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) {
+ platform, err := parseReleaseFile()
+ if err != nil {
+ return "", "", "", err
+ }
+
+ platformFamily, _, platformVersion, err := parseUnameOutput(ctx)
+ if err != nil {
+ return "", "", "", err
+ }
+
+ return platform, platformFamily, platformVersion, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go
new file mode 100644
index 000000000..fcd1d5908
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go
@@ -0,0 +1,279 @@
+//go:build windows
+// +build windows
+
+package host
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/process"
+ "github.com/yusufpapurcu/wmi"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ procGetSystemTimeAsFileTime = common.Modkernel32.NewProc("GetSystemTimeAsFileTime")
+ procGetTickCount32 = common.Modkernel32.NewProc("GetTickCount")
+ procGetTickCount64 = common.Modkernel32.NewProc("GetTickCount64")
+ procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo")
+ procRtlGetVersion = common.ModNt.NewProc("RtlGetVersion")
+)
+
+// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/ns-wdm-_osversioninfoexw
+type osVersionInfoExW struct {
+ dwOSVersionInfoSize uint32
+ dwMajorVersion uint32
+ dwMinorVersion uint32
+ dwBuildNumber uint32
+ dwPlatformId uint32
+ szCSDVersion [128]uint16
+ wServicePackMajor uint16
+ wServicePackMinor uint16
+ wSuiteMask uint16
+ wProductType uint8
+ wReserved uint8
+}
+
+type systemInfo struct {
+ wProcessorArchitecture uint16
+ wReserved uint16
+ dwPageSize uint32
+ lpMinimumApplicationAddress uintptr
+ lpMaximumApplicationAddress uintptr
+ dwActiveProcessorMask uintptr
+ dwNumberOfProcessors uint32
+ dwProcessorType uint32
+ dwAllocationGranularity uint32
+ wProcessorLevel uint16
+ wProcessorRevision uint16
+}
+
+type msAcpi_ThermalZoneTemperature struct {
+ Active bool
+ CriticalTripPoint uint32
+ CurrentTemperature uint32
+ InstanceName string
+}
+
+func HostIDWithContext(ctx context.Context) (string, error) {
+ // there has been reports of issues on 32bit using golang.org/x/sys/windows/registry, see https://github.com/shirou/gopsutil/pull/312#issuecomment-277422612
+ // for rationale of using windows.RegOpenKeyEx/RegQueryValueEx instead of registry.OpenKey/GetStringValue
+ var h windows.Handle
+ err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h)
+ if err != nil {
+ return "", err
+ }
+ defer windows.RegCloseKey(h)
+
+ const windowsRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16
+ const uuidLen = 36
+
+ var regBuf [windowsRegBufLen]uint16
+ bufLen := uint32(windowsRegBufLen)
+ var valType uint32
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
+ if err != nil {
+ return "", err
+ }
+
+ hostID := windows.UTF16ToString(regBuf[:])
+ hostIDLen := len(hostID)
+ if hostIDLen != uuidLen {
+ return "", fmt.Errorf("HostID incorrect: %q\n", hostID)
+ }
+
+ return strings.ToLower(hostID), nil
+}
+
+func numProcs(ctx context.Context) (uint64, error) {
+ procs, err := process.PidsWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(procs)), nil
+}
+
+func UptimeWithContext(ctx context.Context) (uint64, error) {
+ procGetTickCount := procGetTickCount64
+ err := procGetTickCount64.Find()
+ if err != nil {
+ procGetTickCount = procGetTickCount32 // handle WinXP, but keep in mind that "the time will wrap around to zero if the system is run continuously for 49.7 days." from MSDN
+ }
+ r1, _, lastErr := syscall.Syscall(procGetTickCount.Addr(), 0, 0, 0, 0)
+ if lastErr != 0 {
+ return 0, lastErr
+ }
+ return uint64((time.Duration(r1) * time.Millisecond).Seconds()), nil
+}
+
+// cachedBootTime must be accessed via atomic.Load/StoreUint64
+var cachedBootTime uint64
+
+func BootTimeWithContext(ctx context.Context) (uint64, error) {
+ t := atomic.LoadUint64(&cachedBootTime)
+ if t != 0 {
+ return t, nil
+ }
+ up, err := Uptime()
+ if err != nil {
+ return 0, err
+ }
+ t = timeSince(up)
+ atomic.StoreUint64(&cachedBootTime, t)
+ return t, nil
+}
+
+func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) {
+ // GetVersionEx lies on Windows 8.1 and returns as Windows 8 if we don't declare compatibility in manifest
+ // RtlGetVersion bypasses this lying layer and returns the true Windows version
+ // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/nf-wdm-rtlgetversion
+ // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/ns-wdm-_osversioninfoexw
+ var osInfo osVersionInfoExW
+ osInfo.dwOSVersionInfoSize = uint32(unsafe.Sizeof(osInfo))
+ ret, _, err := procRtlGetVersion.Call(uintptr(unsafe.Pointer(&osInfo)))
+ if ret != 0 {
+ return
+ }
+
+ // Platform
+ var h windows.Handle // like HostIDWithContext(), we query the registry using the raw windows.RegOpenKeyEx/RegQueryValueEx
+ err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Windows NT\CurrentVersion`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h)
+ if err != nil {
+ return
+ }
+ defer windows.RegCloseKey(h)
+ var bufLen uint32
+ var valType uint32
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, nil, &bufLen)
+ if err != nil {
+ return
+ }
+ regBuf := make([]uint16, bufLen/2+1)
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
+ if err != nil {
+ return
+ }
+ platform = windows.UTF16ToString(regBuf[:])
+ if strings.Contains(platform, "Windows 10") { // check build number to determine whether it's actually Windows 11
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CurrentBuildNumber`), nil, &valType, nil, &bufLen)
+ if err == nil {
+ regBuf = make([]uint16, bufLen/2+1)
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CurrentBuildNumber`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
+ if err == nil {
+ buildNumberStr := windows.UTF16ToString(regBuf[:])
+ if buildNumber, err := strconv.Atoi(buildNumberStr); err == nil && buildNumber >= 22000 {
+ platform = strings.Replace(platform, "Windows 10", "Windows 11", 1)
+ }
+ }
+ }
+ }
+ if !strings.HasPrefix(platform, "Microsoft") {
+ platform = "Microsoft " + platform
+ }
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, nil, &bufLen) // append Service Pack number, only on success
+ if err == nil { // don't return an error if only the Service Pack retrieval fails
+ regBuf = make([]uint16, bufLen/2+1)
+ err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
+ if err == nil {
+ platform += " " + windows.UTF16ToString(regBuf[:])
+ }
+ }
+
+ // PlatformFamily
+ switch osInfo.wProductType {
+ case 1:
+ family = "Standalone Workstation"
+ case 2:
+ family = "Server (Domain Controller)"
+ case 3:
+ family = "Server"
+ }
+
+ // Platform Version
+ version = fmt.Sprintf("%d.%d.%d Build %d", osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, osInfo.dwBuildNumber)
+
+ return platform, family, version, nil
+}
+
+func UsersWithContext(ctx context.Context) ([]UserStat, error) {
+ var ret []UserStat
+
+ return ret, common.ErrNotImplementedError
+}
+
+func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) {
+ var ret []TemperatureStat
+ var dst []msAcpi_ThermalZoneTemperature
+ q := wmi.CreateQuery(&dst, "")
+ if err := common.WMIQueryWithContext(ctx, q, &dst, nil, "root/wmi"); err != nil {
+ return ret, err
+ }
+
+ for _, v := range dst {
+ ts := TemperatureStat{
+ SensorKey: v.InstanceName,
+ Temperature: kelvinToCelsius(v.CurrentTemperature, 2),
+ }
+ ret = append(ret, ts)
+ }
+
+ return ret, nil
+}
+
+func kelvinToCelsius(temp uint32, n int) float64 {
+ // wmi return temperature Kelvin * 10, so need to divide the result by 10,
+ // and then minus 273.15 to get °Celsius.
+ t := float64(temp/10) - 273.15
+ n10 := math.Pow10(n)
+ return math.Trunc((t+0.5/n10)*n10) / n10
+}
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ return "", "", common.ErrNotImplementedError
+}
+
+func KernelVersionWithContext(ctx context.Context) (string, error) {
+ _, _, version, err := PlatformInformationWithContext(ctx)
+ return version, err
+}
+
+func KernelArch() (string, error) {
+ var systemInfo systemInfo
+ procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo)))
+
+ const (
+ PROCESSOR_ARCHITECTURE_INTEL = 0
+ PROCESSOR_ARCHITECTURE_ARM = 5
+ PROCESSOR_ARCHITECTURE_ARM64 = 12
+ PROCESSOR_ARCHITECTURE_IA64 = 6
+ PROCESSOR_ARCHITECTURE_AMD64 = 9
+ )
+ switch systemInfo.wProcessorArchitecture {
+ case PROCESSOR_ARCHITECTURE_INTEL:
+ if systemInfo.wProcessorLevel < 3 {
+ return "i386", nil
+ }
+ if systemInfo.wProcessorLevel > 6 {
+ return "i686", nil
+ }
+ return fmt.Sprintf("i%d86", systemInfo.wProcessorLevel), nil
+ case PROCESSOR_ARCHITECTURE_ARM:
+ return "arm", nil
+ case PROCESSOR_ARCHITECTURE_ARM64:
+ return "aarch64", nil
+ case PROCESSOR_ARCHITECTURE_IA64:
+ return "ia64", nil
+ case PROCESSOR_ARCHITECTURE_AMD64:
+ return "x86_64", nil
+ }
+ return "", nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c
new file mode 100644
index 000000000..0197d95b4
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c
@@ -0,0 +1,169 @@
+#include
+#include
+#include "smc_darwin.h"
+
+#define IOSERVICE_SMC "AppleSMC"
+#define IOSERVICE_MODEL "IOPlatformExpertDevice"
+
+#define DATA_TYPE_SP78 "sp78"
+
+typedef enum {
+ kSMCUserClientOpen = 0,
+ kSMCUserClientClose = 1,
+ kSMCHandleYPCEvent = 2,
+ kSMCReadKey = 5,
+ kSMCWriteKey = 6,
+ kSMCGetKeyCount = 7,
+ kSMCGetKeyFromIndex = 8,
+ kSMCGetKeyInfo = 9,
+} selector_t;
+
+typedef struct {
+ unsigned char major;
+ unsigned char minor;
+ unsigned char build;
+ unsigned char reserved;
+ unsigned short release;
+} SMCVersion;
+
+typedef struct {
+ uint16_t version;
+ uint16_t length;
+ uint32_t cpuPLimit;
+ uint32_t gpuPLimit;
+ uint32_t memPLimit;
+} SMCPLimitData;
+
+typedef struct {
+ IOByteCount data_size;
+ uint32_t data_type;
+ uint8_t data_attributes;
+} SMCKeyInfoData;
+
+typedef struct {
+ uint32_t key;
+ SMCVersion vers;
+ SMCPLimitData p_limit_data;
+ SMCKeyInfoData key_info;
+ uint8_t result;
+ uint8_t status;
+ uint8_t data8;
+ uint32_t data32;
+ uint8_t bytes[32];
+} SMCParamStruct;
+
+typedef enum {
+ kSMCSuccess = 0,
+ kSMCError = 1,
+ kSMCKeyNotFound = 0x84,
+} kSMC_t;
+
+typedef struct {
+ uint8_t data[32];
+ uint32_t data_type;
+ uint32_t data_size;
+ kSMC_t kSMC;
+} smc_return_t;
+
+static const int SMC_KEY_SIZE = 4; // number of characters in an SMC key.
+static io_connect_t conn; // our connection to the SMC.
+
+kern_return_t gopsutil_v3_open_smc(void) {
+ kern_return_t result;
+ io_service_t service;
+
+ service = IOServiceGetMatchingService(0, IOServiceMatching(IOSERVICE_SMC));
+ if (service == 0) {
+ // Note: IOServiceMatching documents 0 on failure
+ printf("ERROR: %s NOT FOUND\n", IOSERVICE_SMC);
+ return kIOReturnError;
+ }
+
+ result = IOServiceOpen(service, mach_task_self(), 0, &conn);
+ IOObjectRelease(service);
+
+ return result;
+}
+
+kern_return_t gopsutil_v3_close_smc(void) { return IOServiceClose(conn); }
+
+static uint32_t to_uint32(char *key) {
+ uint32_t ans = 0;
+ uint32_t shift = 24;
+
+ if (strlen(key) != SMC_KEY_SIZE) {
+ return 0;
+ }
+
+ for (int i = 0; i < SMC_KEY_SIZE; i++) {
+ ans += key[i] << shift;
+ shift -= 8;
+ }
+
+ return ans;
+}
+
+static kern_return_t call_smc(SMCParamStruct *input, SMCParamStruct *output) {
+ kern_return_t result;
+ size_t input_cnt = sizeof(SMCParamStruct);
+ size_t output_cnt = sizeof(SMCParamStruct);
+
+ result = IOConnectCallStructMethod(conn, kSMCHandleYPCEvent, input, input_cnt,
+ output, &output_cnt);
+
+ if (result != kIOReturnSuccess) {
+ result = err_get_code(result);
+ }
+ return result;
+}
+
+static kern_return_t read_smc(char *key, smc_return_t *result_smc) {
+ kern_return_t result;
+ SMCParamStruct input;
+ SMCParamStruct output;
+
+ memset(&input, 0, sizeof(SMCParamStruct));
+ memset(&output, 0, sizeof(SMCParamStruct));
+ memset(result_smc, 0, sizeof(smc_return_t));
+
+ input.key = to_uint32(key);
+ input.data8 = kSMCGetKeyInfo;
+
+ result = call_smc(&input, &output);
+ result_smc->kSMC = output.result;
+
+ if (result != kIOReturnSuccess || output.result != kSMCSuccess) {
+ return result;
+ }
+
+ result_smc->data_size = output.key_info.data_size;
+ result_smc->data_type = output.key_info.data_type;
+
+ input.key_info.data_size = output.key_info.data_size;
+ input.data8 = kSMCReadKey;
+
+ result = call_smc(&input, &output);
+ result_smc->kSMC = output.result;
+
+ if (result != kIOReturnSuccess || output.result != kSMCSuccess) {
+ return result;
+ }
+
+ memcpy(result_smc->data, output.bytes, sizeof(output.bytes));
+
+ return result;
+}
+
+double gopsutil_v3_get_temperature(char *key) {
+ kern_return_t result;
+ smc_return_t result_smc;
+
+ result = read_smc(key, &result_smc);
+
+ if (!(result == kIOReturnSuccess) && result_smc.data_size == 2 &&
+ result_smc.data_type == to_uint32(DATA_TYPE_SP78)) {
+ return 0.0;
+ }
+
+ return (double)result_smc.data[0];
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h
new file mode 100644
index 000000000..e3013abdb
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h
@@ -0,0 +1,32 @@
+#ifndef __SMC_H__
+#define __SMC_H__ 1
+
+#include
+
+#define AMBIENT_AIR_0 "TA0P"
+#define AMBIENT_AIR_1 "TA1P"
+#define CPU_0_DIODE "TC0D"
+#define CPU_0_HEATSINK "TC0H"
+#define CPU_0_PROXIMITY "TC0P"
+#define ENCLOSURE_BASE_0 "TB0T"
+#define ENCLOSURE_BASE_1 "TB1T"
+#define ENCLOSURE_BASE_2 "TB2T"
+#define ENCLOSURE_BASE_3 "TB3T"
+#define GPU_0_DIODE "TG0D"
+#define GPU_0_HEATSINK "TG0H"
+#define GPU_0_PROXIMITY "TG0P"
+#define HARD_DRIVE_BAY "TH0P"
+#define MEMORY_SLOT_0 "TM0S"
+#define MEMORY_SLOTS_PROXIMITY "TM0P"
+#define NORTHBRIDGE "TN0H"
+#define NORTHBRIDGE_DIODE "TN0D"
+#define NORTHBRIDGE_PROXIMITY "TN0P"
+#define THUNDERBOLT_0 "TI0P"
+#define THUNDERBOLT_1 "TI1P"
+#define WIRELESS_MODULE "TW0P"
+
+kern_return_t gopsutil_v3_open_smc(void);
+kern_return_t gopsutil_v3_close_smc(void);
+double gopsutil_v3_get_temperature(char *);
+
+#endif // __SMC_H__
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/host/types.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/types.go
new file mode 100644
index 000000000..c2e7c0bda
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/host/types.go
@@ -0,0 +1,24 @@
+package host
+
+import (
+ "fmt"
+)
+
+type Warnings struct {
+ List []error
+}
+
+func (w *Warnings) Add(err error) {
+ w.List = append(w.List, err)
+}
+
+func (w *Warnings) Reference() error {
+ if len(w.List) > 0 {
+ return w
+ }
+ return nil
+}
+
+func (w *Warnings) Error() string {
+ return fmt.Sprintf("Number of warnings: %v", len(w.List))
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go
new file mode 100644
index 000000000..446c3597f
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go
@@ -0,0 +1,636 @@
+package common
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package binary implements simple translation between numbers and byte
+// sequences and encoding and decoding of varints.
+//
+// Numbers are translated by reading and writing fixed-size values.
+// A fixed-size value is either a fixed-size arithmetic
+// type (int8, uint8, int16, float32, complex64, ...)
+// or an array or struct containing only fixed-size values.
+//
+// The varint functions encode and decode single integer values using
+// a variable-length encoding; smaller values require fewer bytes.
+// For a specification, see
+// http://code.google.com/apis/protocolbuffers/docs/encoding.html.
+//
+// This package favors simplicity over efficiency. Clients that require
+// high-performance serialization, especially for large data structures,
+// should look at more advanced solutions such as the encoding/gob
+// package or protocol buffers.
+import (
+ "errors"
+ "io"
+ "math"
+ "reflect"
+)
+
+// A ByteOrder specifies how to convert byte sequences into
+// 16-, 32-, or 64-bit unsigned integers.
+type ByteOrder interface {
+ Uint16([]byte) uint16
+ Uint32([]byte) uint32
+ Uint64([]byte) uint64
+ PutUint16([]byte, uint16)
+ PutUint32([]byte, uint32)
+ PutUint64([]byte, uint64)
+ String() string
+}
+
+// LittleEndian is the little-endian implementation of ByteOrder.
+var LittleEndian littleEndian
+
+// BigEndian is the big-endian implementation of ByteOrder.
+var BigEndian bigEndian
+
+type littleEndian struct{}
+
+func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
+
+func (littleEndian) PutUint16(b []byte, v uint16) {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+}
+
+func (littleEndian) Uint32(b []byte) uint32 {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func (littleEndian) PutUint32(b []byte, v uint32) {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+}
+
+func (littleEndian) Uint64(b []byte) uint64 {
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (littleEndian) PutUint64(b []byte, v uint64) {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ b[4] = byte(v >> 32)
+ b[5] = byte(v >> 40)
+ b[6] = byte(v >> 48)
+ b[7] = byte(v >> 56)
+}
+
+func (littleEndian) String() string { return "LittleEndian" }
+
+func (littleEndian) GoString() string { return "binary.LittleEndian" }
+
+type bigEndian struct{}
+
+func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }
+
+func (bigEndian) PutUint16(b []byte, v uint16) {
+ b[0] = byte(v >> 8)
+ b[1] = byte(v)
+}
+
+func (bigEndian) Uint32(b []byte) uint32 {
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func (bigEndian) PutUint32(b []byte, v uint32) {
+ b[0] = byte(v >> 24)
+ b[1] = byte(v >> 16)
+ b[2] = byte(v >> 8)
+ b[3] = byte(v)
+}
+
+func (bigEndian) Uint64(b []byte) uint64 {
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+func (bigEndian) PutUint64(b []byte, v uint64) {
+ b[0] = byte(v >> 56)
+ b[1] = byte(v >> 48)
+ b[2] = byte(v >> 40)
+ b[3] = byte(v >> 32)
+ b[4] = byte(v >> 24)
+ b[5] = byte(v >> 16)
+ b[6] = byte(v >> 8)
+ b[7] = byte(v)
+}
+
+func (bigEndian) String() string { return "BigEndian" }
+
+func (bigEndian) GoString() string { return "binary.BigEndian" }
+
+// Read reads structured binary data from r into data.
+// Data must be a pointer to a fixed-size value or a slice
+// of fixed-size values.
+// Bytes read from r are decoded using the specified byte order
+// and written to successive fields of the data.
+// When reading into structs, the field data for fields with
+// blank (_) field names is skipped; i.e., blank field names
+// may be used for padding.
+// When reading into a struct, all non-blank fields must be exported.
+func Read(r io.Reader, order ByteOrder, data interface{}) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ var b [8]byte
+ var bs []byte
+ if n > len(b) {
+ bs = make([]byte, n)
+ } else {
+ bs = b[:n]
+ }
+ if _, err := io.ReadFull(r, bs); err != nil {
+ return err
+ }
+ switch data := data.(type) {
+ case *int8:
+ *data = int8(b[0])
+ case *uint8:
+ *data = b[0]
+ case *int16:
+ *data = int16(order.Uint16(bs))
+ case *uint16:
+ *data = order.Uint16(bs)
+ case *int32:
+ *data = int32(order.Uint32(bs))
+ case *uint32:
+ *data = order.Uint32(bs)
+ case *int64:
+ *data = int64(order.Uint64(bs))
+ case *uint64:
+ *data = order.Uint64(bs)
+ case []int8:
+ for i, x := range bs { // Easier to loop over the input for 8-bit values.
+ data[i] = int8(x)
+ }
+ case []uint8:
+ copy(data, bs)
+ case []int16:
+ for i := range data {
+ data[i] = int16(order.Uint16(bs[2*i:]))
+ }
+ case []uint16:
+ for i := range data {
+ data[i] = order.Uint16(bs[2*i:])
+ }
+ case []int32:
+ for i := range data {
+ data[i] = int32(order.Uint32(bs[4*i:]))
+ }
+ case []uint32:
+ for i := range data {
+ data[i] = order.Uint32(bs[4*i:])
+ }
+ case []int64:
+ for i := range data {
+ data[i] = int64(order.Uint64(bs[8*i:]))
+ }
+ case []uint64:
+ for i := range data {
+ data[i] = order.Uint64(bs[8*i:])
+ }
+ }
+ return nil
+ }
+
+ // Fallback to reflect-based decoding.
+ v := reflect.ValueOf(data)
+ size := -1
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ size = dataSize(v)
+ case reflect.Slice:
+ size = dataSize(v)
+ }
+ if size < 0 {
+ return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
+ }
+ d := &decoder{order: order, buf: make([]byte, size)}
+ if _, err := io.ReadFull(r, d.buf); err != nil {
+ return err
+ }
+ d.value(v)
+ return nil
+}
+
+// Write writes the binary representation of data into w.
+// Data must be a fixed-size value or a slice of fixed-size
+// values, or a pointer to such data.
+// Bytes written to w are encoded using the specified byte order
+// and read from successive fields of the data.
+// When writing structs, zero values are written for fields
+// with blank (_) field names.
+func Write(w io.Writer, order ByteOrder, data interface{}) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ var b [8]byte
+ var bs []byte
+ if n > len(b) {
+ bs = make([]byte, n)
+ } else {
+ bs = b[:n]
+ }
+ switch v := data.(type) {
+ case *int8:
+ bs = b[:1]
+ b[0] = byte(*v)
+ case int8:
+ bs = b[:1]
+ b[0] = byte(v)
+ case []int8:
+ for i, x := range v {
+ bs[i] = byte(x)
+ }
+ case *uint8:
+ bs = b[:1]
+ b[0] = *v
+ case uint8:
+ bs = b[:1]
+ b[0] = byte(v)
+ case []uint8:
+ bs = v
+ case *int16:
+ bs = b[:2]
+ order.PutUint16(bs, uint16(*v))
+ case int16:
+ bs = b[:2]
+ order.PutUint16(bs, uint16(v))
+ case []int16:
+ for i, x := range v {
+ order.PutUint16(bs[2*i:], uint16(x))
+ }
+ case *uint16:
+ bs = b[:2]
+ order.PutUint16(bs, *v)
+ case uint16:
+ bs = b[:2]
+ order.PutUint16(bs, v)
+ case []uint16:
+ for i, x := range v {
+ order.PutUint16(bs[2*i:], x)
+ }
+ case *int32:
+ bs = b[:4]
+ order.PutUint32(bs, uint32(*v))
+ case int32:
+ bs = b[:4]
+ order.PutUint32(bs, uint32(v))
+ case []int32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], uint32(x))
+ }
+ case *uint32:
+ bs = b[:4]
+ order.PutUint32(bs, *v)
+ case uint32:
+ bs = b[:4]
+ order.PutUint32(bs, v)
+ case []uint32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], x)
+ }
+ case *int64:
+ bs = b[:8]
+ order.PutUint64(bs, uint64(*v))
+ case int64:
+ bs = b[:8]
+ order.PutUint64(bs, uint64(v))
+ case []int64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], uint64(x))
+ }
+ case *uint64:
+ bs = b[:8]
+ order.PutUint64(bs, *v)
+ case uint64:
+ bs = b[:8]
+ order.PutUint64(bs, v)
+ case []uint64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], x)
+ }
+ }
+ _, err := w.Write(bs)
+ return err
+ }
+
+ // Fallback to reflect-based encoding.
+ v := reflect.Indirect(reflect.ValueOf(data))
+ size := dataSize(v)
+ if size < 0 {
+ return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
+ }
+ buf := make([]byte, size)
+ e := &encoder{order: order, buf: buf}
+ e.value(v)
+ _, err := w.Write(buf)
+ return err
+}
+
+// Size returns how many bytes Write would generate to encode the value v, which
+// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
+// If v is neither of these, Size returns -1.
+func Size(v interface{}) int {
+ return dataSize(reflect.Indirect(reflect.ValueOf(v)))
+}
+
+// dataSize returns the number of bytes the actual data represented by v occupies in memory.
+// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
+// it returns the length of the slice times the element size and does not count the memory
+// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
+func dataSize(v reflect.Value) int {
+ if v.Kind() == reflect.Slice {
+ if s := sizeof(v.Type().Elem()); s >= 0 {
+ return s * v.Len()
+ }
+ return -1
+ }
+ return sizeof(v.Type())
+}
+
+// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
+func sizeof(t reflect.Type) int {
+ switch t.Kind() {
+ case reflect.Array:
+ if s := sizeof(t.Elem()); s >= 0 {
+ return s * t.Len()
+ }
+
+ case reflect.Struct:
+ sum := 0
+ for i, n := 0, t.NumField(); i < n; i++ {
+ s := sizeof(t.Field(i).Type)
+ if s < 0 {
+ return -1
+ }
+ sum += s
+ }
+ return sum
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr:
+ return int(t.Size())
+ }
+
+ return -1
+}
+
+type coder struct {
+ order ByteOrder
+ buf []byte
+}
+
+type (
+ decoder coder
+ encoder coder
+)
+
+func (d *decoder) uint8() uint8 {
+ x := d.buf[0]
+ d.buf = d.buf[1:]
+ return x
+}
+
+func (e *encoder) uint8(x uint8) {
+ e.buf[0] = x
+ e.buf = e.buf[1:]
+}
+
+func (d *decoder) uint16() uint16 {
+ x := d.order.Uint16(d.buf[0:2])
+ d.buf = d.buf[2:]
+ return x
+}
+
+func (e *encoder) uint16(x uint16) {
+ e.order.PutUint16(e.buf[0:2], x)
+ e.buf = e.buf[2:]
+}
+
+func (d *decoder) uint32() uint32 {
+ x := d.order.Uint32(d.buf[0:4])
+ d.buf = d.buf[4:]
+ return x
+}
+
+func (e *encoder) uint32(x uint32) {
+ e.order.PutUint32(e.buf[0:4], x)
+ e.buf = e.buf[4:]
+}
+
+func (d *decoder) uint64() uint64 {
+ x := d.order.Uint64(d.buf[0:8])
+ d.buf = d.buf[8:]
+ return x
+}
+
+func (e *encoder) uint64(x uint64) {
+ e.order.PutUint64(e.buf[0:8], x)
+ e.buf = e.buf[8:]
+}
+
+func (d *decoder) int8() int8 { return int8(d.uint8()) }
+
+func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
+
+func (d *decoder) int16() int16 { return int16(d.uint16()) }
+
+func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
+
+func (d *decoder) int32() int32 { return int32(d.uint32()) }
+
+func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
+
+func (d *decoder) int64() int64 { return int64(d.uint64()) }
+
+func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
+
+func (d *decoder) value(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Array:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ d.value(v.Index(i))
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ // Note: Calling v.CanSet() below is an optimization.
+ // It would be sufficient to check the field name,
+ // but creating the StructField info for each field is
+ // costly (run "go test -bench=ReadStruct" and compare
+ // results when making changes to this code).
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ d.value(v)
+ } else {
+ d.skip(v)
+ }
+ }
+
+ case reflect.Slice:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ d.value(v.Index(i))
+ }
+
+ case reflect.Int8:
+ v.SetInt(int64(d.int8()))
+ case reflect.Int16:
+ v.SetInt(int64(d.int16()))
+ case reflect.Int32:
+ v.SetInt(int64(d.int32()))
+ case reflect.Int64:
+ v.SetInt(d.int64())
+
+ case reflect.Uint8:
+ v.SetUint(uint64(d.uint8()))
+ case reflect.Uint16:
+ v.SetUint(uint64(d.uint16()))
+ case reflect.Uint32:
+ v.SetUint(uint64(d.uint32()))
+ case reflect.Uint64:
+ v.SetUint(d.uint64())
+
+ case reflect.Float32:
+ v.SetFloat(float64(math.Float32frombits(d.uint32())))
+ case reflect.Float64:
+ v.SetFloat(math.Float64frombits(d.uint64()))
+
+ case reflect.Complex64:
+ v.SetComplex(complex(
+ float64(math.Float32frombits(d.uint32())),
+ float64(math.Float32frombits(d.uint32())),
+ ))
+ case reflect.Complex128:
+ v.SetComplex(complex(
+ math.Float64frombits(d.uint64()),
+ math.Float64frombits(d.uint64()),
+ ))
+ }
+}
+
+func (e *encoder) value(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Array:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ e.value(v.Index(i))
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ // see comment for corresponding code in decoder.value()
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ e.value(v)
+ } else {
+ e.skip(v)
+ }
+ }
+
+ case reflect.Slice:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ e.value(v.Index(i))
+ }
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch v.Type().Kind() {
+ case reflect.Int8:
+ e.int8(int8(v.Int()))
+ case reflect.Int16:
+ e.int16(int16(v.Int()))
+ case reflect.Int32:
+ e.int32(int32(v.Int()))
+ case reflect.Int64:
+ e.int64(v.Int())
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch v.Type().Kind() {
+ case reflect.Uint8:
+ e.uint8(uint8(v.Uint()))
+ case reflect.Uint16:
+ e.uint16(uint16(v.Uint()))
+ case reflect.Uint32:
+ e.uint32(uint32(v.Uint()))
+ case reflect.Uint64:
+ e.uint64(v.Uint())
+ }
+
+ case reflect.Float32, reflect.Float64:
+ switch v.Type().Kind() {
+ case reflect.Float32:
+ e.uint32(math.Float32bits(float32(v.Float())))
+ case reflect.Float64:
+ e.uint64(math.Float64bits(v.Float()))
+ }
+
+ case reflect.Complex64, reflect.Complex128:
+ switch v.Type().Kind() {
+ case reflect.Complex64:
+ x := v.Complex()
+ e.uint32(math.Float32bits(float32(real(x))))
+ e.uint32(math.Float32bits(float32(imag(x))))
+ case reflect.Complex128:
+ x := v.Complex()
+ e.uint64(math.Float64bits(real(x)))
+ e.uint64(math.Float64bits(imag(x)))
+ }
+ }
+}
+
+func (d *decoder) skip(v reflect.Value) {
+ d.buf = d.buf[dataSize(v):]
+}
+
+func (e *encoder) skip(v reflect.Value) {
+ n := dataSize(v)
+ for i := range e.buf[0:n] {
+ e.buf[i] = 0
+ }
+ e.buf = e.buf[n:]
+}
+
+// intDataSize returns the size of the data required to represent the data when encoded.
+// It returns zero if the type cannot be implemented by the fast path in Read or Write.
+func intDataSize(data interface{}) int {
+ switch data := data.(type) {
+ case int8, *int8, *uint8:
+ return 1
+ case []int8:
+ return len(data)
+ case []uint8:
+ return len(data)
+ case int16, *int16, *uint16:
+ return 2
+ case []int16:
+ return 2 * len(data)
+ case []uint16:
+ return 2 * len(data)
+ case int32, *int32, *uint32:
+ return 4
+ case []int32:
+ return 4 * len(data)
+ case []uint32:
+ return 4 * len(data)
+ case int64, *int64, *uint64:
+ return 8
+ case []int64:
+ return 8 * len(data)
+ case []uint64:
+ return 8 * len(data)
+ }
+ return 0
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go
new file mode 100644
index 000000000..adc4922bd
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go
@@ -0,0 +1,391 @@
+package common
+
+//
+// gopsutil is a port of psutil(http://pythonhosted.org/psutil/).
+// This covers these architectures.
+// - linux (amd64, arm)
+// - freebsd (amd64)
+// - windows (amd64)
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ Timeout = 3 * time.Second
+ ErrTimeout = errors.New("command timed out")
+)
+
+type Invoker interface {
+ Command(string, ...string) ([]byte, error)
+ CommandWithContext(context.Context, string, ...string) ([]byte, error)
+}
+
+type Invoke struct{}
+
+func (i Invoke) Command(name string, arg ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), Timeout)
+ defer cancel()
+ return i.CommandWithContext(ctx, name, arg...)
+}
+
+func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) {
+ cmd := exec.CommandContext(ctx, name, arg...)
+
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+
+ if err := cmd.Start(); err != nil {
+ return buf.Bytes(), err
+ }
+
+ if err := cmd.Wait(); err != nil {
+ return buf.Bytes(), err
+ }
+
+ return buf.Bytes(), nil
+}
+
+type FakeInvoke struct {
+ Suffix string // Suffix species expected file name suffix such as "fail"
+ Error error // If Error specified, return the error.
+}
+
+// Command in FakeInvoke returns from expected file if exists.
+func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) {
+ if i.Error != nil {
+ return []byte{}, i.Error
+ }
+
+ arch := runtime.GOOS
+
+ commandName := filepath.Base(name)
+
+ fname := strings.Join(append([]string{commandName}, arg...), "")
+ fname = url.QueryEscape(fname)
+ fpath := path.Join("testdata", arch, fname)
+ if i.Suffix != "" {
+ fpath += "_" + i.Suffix
+ }
+ if PathExists(fpath) {
+ return ioutil.ReadFile(fpath)
+ }
+ return []byte{}, fmt.Errorf("could not find testdata: %s", fpath)
+}
+
+func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) {
+ return i.Command(name, arg...)
+}
+
+var ErrNotImplementedError = errors.New("not implemented yet")
+
+// ReadFile reads contents from a file
+func ReadFile(filename string) (string, error) {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return string(content), nil
+}
+
+// ReadLines reads contents from a file and splits them by new lines.
+// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
+func ReadLines(filename string) ([]string, error) {
+ return ReadLinesOffsetN(filename, 0, -1)
+}
+
+// ReadLinesOffsetN reads contents from file and splits them by new line.
+// The offset tells at which line number to start.
+// The count determines the number of lines to read (starting from offset):
+// n >= 0: at most n lines
+// n < 0: whole file
+func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return []string{""}, err
+ }
+ defer f.Close()
+
+ var ret []string
+
+ r := bufio.NewReader(f)
+ for i := 0; i < n+int(offset) || n < 0; i++ {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ if err == io.EOF && len(line) > 0 {
+ ret = append(ret, strings.Trim(line, "\n"))
+ }
+ break
+ }
+ if i < int(offset) {
+ continue
+ }
+ ret = append(ret, strings.Trim(line, "\n"))
+ }
+
+ return ret, nil
+}
+
+func IntToString(orig []int8) string {
+ ret := make([]byte, len(orig))
+ size := -1
+ for i, o := range orig {
+ if o == 0 {
+ size = i
+ break
+ }
+ ret[i] = byte(o)
+ }
+ if size == -1 {
+ size = len(orig)
+ }
+
+ return string(ret[0:size])
+}
+
+func UintToString(orig []uint8) string {
+ ret := make([]byte, len(orig))
+ size := -1
+ for i, o := range orig {
+ if o == 0 {
+ size = i
+ break
+ }
+ ret[i] = byte(o)
+ }
+ if size == -1 {
+ size = len(orig)
+ }
+
+ return string(ret[0:size])
+}
+
+func ByteToString(orig []byte) string {
+ n := -1
+ l := -1
+ for i, b := range orig {
+ // skip left side null
+ if l == -1 && b == 0 {
+ continue
+ }
+ if l == -1 {
+ l = i
+ }
+
+ if b == 0 {
+ break
+ }
+ n = i + 1
+ }
+ if n == -1 {
+ return string(orig)
+ }
+ return string(orig[l:n])
+}
+
+// ReadInts reads contents from single line file and returns them as []int32.
+func ReadInts(filename string) ([]int64, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return []int64{}, err
+ }
+ defer f.Close()
+
+ var ret []int64
+
+ r := bufio.NewReader(f)
+
+ // The int files that this is concerned with should only be one liners.
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return []int64{}, err
+ }
+
+ i, err := strconv.ParseInt(strings.Trim(line, "\n"), 10, 32)
+ if err != nil {
+ return []int64{}, err
+ }
+ ret = append(ret, i)
+
+ return ret, nil
+}
+
+// Parse Hex to uint32 without error
+func HexToUint32(hex string) uint32 {
+ vv, _ := strconv.ParseUint(hex, 16, 32)
+ return uint32(vv)
+}
+
+// Parse to int32 without error
+func mustParseInt32(val string) int32 {
+ vv, _ := strconv.ParseInt(val, 10, 32)
+ return int32(vv)
+}
+
+// Parse to uint64 without error
+func mustParseUint64(val string) uint64 {
+ vv, _ := strconv.ParseInt(val, 10, 64)
+ return uint64(vv)
+}
+
+// Parse to Float64 without error
+func mustParseFloat64(val string) float64 {
+ vv, _ := strconv.ParseFloat(val, 64)
+ return vv
+}
+
+// StringsHas checks the target string slice contains src or not
+func StringsHas(target []string, src string) bool {
+ for _, t := range target {
+ if strings.TrimSpace(t) == src {
+ return true
+ }
+ }
+ return false
+}
+
+// StringsContains checks the src in any string of the target string slice
+func StringsContains(target []string, src string) bool {
+ for _, t := range target {
+ if strings.Contains(t, src) {
+ return true
+ }
+ }
+ return false
+}
+
+// IntContains checks the src in any int of the target int slice.
+func IntContains(target []int, src int) bool {
+ for _, t := range target {
+ if src == t {
+ return true
+ }
+ }
+ return false
+}
+
+// get struct attributes.
+// This method is used only for debugging platform dependent code.
+func attributes(m interface{}) map[string]reflect.Type {
+ typ := reflect.TypeOf(m)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+
+ attrs := make(map[string]reflect.Type)
+ if typ.Kind() != reflect.Struct {
+ return nil
+ }
+
+ for i := 0; i < typ.NumField(); i++ {
+ p := typ.Field(i)
+ if !p.Anonymous {
+ attrs[p.Name] = p.Type
+ }
+ }
+
+ return attrs
+}
+
+func PathExists(filename string) bool {
+ if _, err := os.Stat(filename); err == nil {
+ return true
+ }
+ return false
+}
+
+// PathExistsWithContents returns the filename exists and it is not empty
+func PathExistsWithContents(filename string) bool {
+ info, err := os.Stat(filename)
+ if err != nil {
+ return false
+ }
+ return info.Size() > 4 // at least 4 bytes
+}
+
+// GetEnv retrieves the environment variable key. If it does not exist it returns the default.
+func GetEnv(key string, dfault string, combineWith ...string) string {
+ value := os.Getenv(key)
+ if value == "" {
+ value = dfault
+ }
+
+ switch len(combineWith) {
+ case 0:
+ return value
+ case 1:
+ return filepath.Join(value, combineWith[0])
+ default:
+ all := make([]string, len(combineWith)+1)
+ all[0] = value
+ copy(all[1:], combineWith)
+ return filepath.Join(all...)
+ }
+}
+
+func HostProc(combineWith ...string) string {
+ return GetEnv("HOST_PROC", "/proc", combineWith...)
+}
+
+func HostSys(combineWith ...string) string {
+ return GetEnv("HOST_SYS", "/sys", combineWith...)
+}
+
+func HostEtc(combineWith ...string) string {
+ return GetEnv("HOST_ETC", "/etc", combineWith...)
+}
+
+func HostVar(combineWith ...string) string {
+ return GetEnv("HOST_VAR", "/var", combineWith...)
+}
+
+func HostRun(combineWith ...string) string {
+ return GetEnv("HOST_RUN", "/run", combineWith...)
+}
+
+func HostDev(combineWith ...string) string {
+ return GetEnv("HOST_DEV", "/dev", combineWith...)
+}
+
+// MockEnv set environment variable and return revert function.
+// MockEnv should be used testing only.
+func MockEnv(key string, value string) func() {
+ original := os.Getenv(key)
+ os.Setenv(key, value)
+ return func() {
+ os.Setenv(key, original)
+ }
+}
+
+// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running
+// sysctl commands (see DoSysctrl).
+func getSysctrlEnv(env []string) []string {
+ foundLC := false
+ for i, line := range env {
+ if strings.HasPrefix(line, "LC_ALL") {
+ env[i] = "LC_ALL=C"
+ foundLC = true
+ }
+ }
+ if !foundLC {
+ env = append(env, "LC_ALL=C")
+ }
+ return env
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go
new file mode 100644
index 000000000..f1a784597
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go
@@ -0,0 +1,66 @@
+//go:build darwin
+// +build darwin
+
+package common
+
+import (
+ "context"
+ "os"
+ "os/exec"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) {
+ cmd := exec.CommandContext(ctx, "sysctl", "-n", mib)
+ cmd.Env = getSysctrlEnv(os.Environ())
+ out, err := cmd.Output()
+ if err != nil {
+ return []string{}, err
+ }
+ v := strings.Replace(string(out), "{ ", "", 1)
+ v = strings.Replace(string(v), " }", "", 1)
+ values := strings.Fields(string(v))
+
+ return values, nil
+}
+
+func CallSyscall(mib []int32) ([]byte, uint64, error) {
+ miblen := uint64(len(mib))
+
+ // get required buffer size
+ length := uint64(0)
+ _, _, err := unix.Syscall6(
+ 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146
+ uintptr(unsafe.Pointer(&mib[0])),
+ uintptr(miblen),
+ 0,
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ var b []byte
+ return b, length, err
+ }
+ if length == 0 {
+ var b []byte
+ return b, length, err
+ }
+ // get proc info itself
+ buf := make([]byte, length)
+ _, _, err = unix.Syscall6(
+ 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146
+ uintptr(unsafe.Pointer(&mib[0])),
+ uintptr(miblen),
+ uintptr(unsafe.Pointer(&buf[0])),
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ return buf, length, err
+ }
+
+ return buf, length, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go
new file mode 100644
index 000000000..f590e2e67
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go
@@ -0,0 +1,82 @@
+//go:build freebsd || openbsd
+// +build freebsd openbsd
+
+package common
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func SysctlUint(mib string) (uint64, error) {
+ buf, err := unix.SysctlRaw(mib)
+ if err != nil {
+ return 0, err
+ }
+ if len(buf) == 8 { // 64 bit
+ return *(*uint64)(unsafe.Pointer(&buf[0])), nil
+ }
+ if len(buf) == 4 { // 32bit
+ t := *(*uint32)(unsafe.Pointer(&buf[0]))
+ return uint64(t), nil
+ }
+ return 0, fmt.Errorf("unexpected size: %s, %d", mib, len(buf))
+}
+
+func DoSysctrl(mib string) ([]string, error) {
+ cmd := exec.Command("sysctl", "-n", mib)
+ cmd.Env = getSysctrlEnv(os.Environ())
+ out, err := cmd.Output()
+ if err != nil {
+ return []string{}, err
+ }
+ v := strings.Replace(string(out), "{ ", "", 1)
+ v = strings.Replace(string(v), " }", "", 1)
+ values := strings.Fields(string(v))
+
+ return values, nil
+}
+
+func CallSyscall(mib []int32) ([]byte, uint64, error) {
+ mibptr := unsafe.Pointer(&mib[0])
+ miblen := uint64(len(mib))
+
+ // get required buffer size
+ length := uint64(0)
+ _, _, err := unix.Syscall6(
+ unix.SYS___SYSCTL,
+ uintptr(mibptr),
+ uintptr(miblen),
+ 0,
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ var b []byte
+ return b, length, err
+ }
+ if length == 0 {
+ var b []byte
+ return b, length, err
+ }
+ // get proc info itself
+ buf := make([]byte, length)
+ _, _, err = unix.Syscall6(
+ unix.SYS___SYSCTL,
+ uintptr(mibptr),
+ uintptr(miblen),
+ uintptr(unsafe.Pointer(&buf[0])),
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ return buf, length, err
+ }
+
+ return buf, length, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go
new file mode 100644
index 000000000..da44c3f2b
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go
@@ -0,0 +1,294 @@
+//go:build linux
+// +build linux
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+func DoSysctrl(mib string) ([]string, error) {
+ cmd := exec.Command("sysctl", "-n", mib)
+ cmd.Env = getSysctrlEnv(os.Environ())
+ out, err := cmd.Output()
+ if err != nil {
+ return []string{}, err
+ }
+ v := strings.Replace(string(out), "{ ", "", 1)
+ v = strings.Replace(string(v), " }", "", 1)
+ values := strings.Fields(string(v))
+
+ return values, nil
+}
+
+func NumProcs() (uint64, error) {
+ f, err := os.Open(HostProc())
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ list, err := f.Readdirnames(-1)
+ if err != nil {
+ return 0, err
+ }
+ var cnt uint64
+
+ for _, v := range list {
+ if _, err = strconv.ParseUint(v, 10, 64); err == nil {
+ cnt++
+ }
+ }
+
+ return cnt, nil
+}
+
+func BootTimeWithContext(ctx context.Context) (uint64, error) {
+ system, role, err := VirtualizationWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+
+ statFile := "stat"
+ if system == "lxc" && role == "guest" {
+ // if lxc, /proc/uptime is used.
+ statFile = "uptime"
+ } else if system == "docker" && role == "guest" {
+ // also docker, guest
+ statFile = "uptime"
+ }
+
+ filename := HostProc(statFile)
+ lines, err := ReadLines(filename)
+ if err != nil {
+ return 0, err
+ }
+
+ if statFile == "stat" {
+ for _, line := range lines {
+ if strings.HasPrefix(line, "btime") {
+ f := strings.Fields(line)
+ if len(f) != 2 {
+ return 0, fmt.Errorf("wrong btime format")
+ }
+ b, err := strconv.ParseInt(f[1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ t := uint64(b)
+ return t, nil
+ }
+ }
+ } else if statFile == "uptime" {
+ if len(lines) != 1 {
+ return 0, fmt.Errorf("wrong uptime format")
+ }
+ f := strings.Fields(lines[0])
+ b, err := strconv.ParseFloat(f[0], 64)
+ if err != nil {
+ return 0, err
+ }
+ currentTime := float64(time.Now().UnixNano()) / float64(time.Second)
+ t := currentTime - b
+ return uint64(t), nil
+ }
+
+ return 0, fmt.Errorf("could not find btime")
+}
+
+func Virtualization() (string, string, error) {
+ return VirtualizationWithContext(context.Background())
+}
+
+// required variables for concurrency safe virtualization caching
+var (
+ cachedVirtMap map[string]string
+ cachedVirtMutex sync.RWMutex
+ cachedVirtOnce sync.Once
+)
+
+func VirtualizationWithContext(ctx context.Context) (string, string, error) {
+ var system, role string
+
+ // if cached already, return from cache
+ cachedVirtMutex.RLock() // unlock won't be deferred so concurrent reads don't wait for long
+ if cachedVirtMap != nil {
+ cachedSystem, cachedRole := cachedVirtMap["system"], cachedVirtMap["role"]
+ cachedVirtMutex.RUnlock()
+ return cachedSystem, cachedRole, nil
+ }
+ cachedVirtMutex.RUnlock()
+
+ filename := HostProc("xen")
+ if PathExists(filename) {
+ system = "xen"
+ role = "guest" // assume guest
+
+ if PathExists(filepath.Join(filename, "capabilities")) {
+ contents, err := ReadLines(filepath.Join(filename, "capabilities"))
+ if err == nil {
+ if StringsContains(contents, "control_d") {
+ role = "host"
+ }
+ }
+ }
+ }
+
+ filename = HostProc("modules")
+ if PathExists(filename) {
+ contents, err := ReadLines(filename)
+ if err == nil {
+ if StringsContains(contents, "kvm") {
+ system = "kvm"
+ role = "host"
+ } else if StringsContains(contents, "vboxdrv") {
+ system = "vbox"
+ role = "host"
+ } else if StringsContains(contents, "vboxguest") {
+ system = "vbox"
+ role = "guest"
+ } else if StringsContains(contents, "vmware") {
+ system = "vmware"
+ role = "guest"
+ }
+ }
+ }
+
+ filename = HostProc("cpuinfo")
+ if PathExists(filename) {
+ contents, err := ReadLines(filename)
+ if err == nil {
+ if StringsContains(contents, "QEMU Virtual CPU") ||
+ StringsContains(contents, "Common KVM processor") ||
+ StringsContains(contents, "Common 32-bit KVM processor") {
+ system = "kvm"
+ role = "guest"
+ }
+ }
+ }
+
+ filename = HostProc("bus/pci/devices")
+ if PathExists(filename) {
+ contents, err := ReadLines(filename)
+ if err == nil {
+ if StringsContains(contents, "virtio-pci") {
+ role = "guest"
+ }
+ }
+ }
+
+ filename = HostProc()
+ if PathExists(filepath.Join(filename, "bc", "0")) {
+ system = "openvz"
+ role = "host"
+ } else if PathExists(filepath.Join(filename, "vz")) {
+ system = "openvz"
+ role = "guest"
+ }
+
+ // not use dmidecode because it requires root
+ if PathExists(filepath.Join(filename, "self", "status")) {
+ contents, err := ReadLines(filepath.Join(filename, "self", "status"))
+ if err == nil {
+ if StringsContains(contents, "s_context:") ||
+ StringsContains(contents, "VxID:") {
+ system = "linux-vserver"
+ }
+ // TODO: guest or host
+ }
+ }
+
+ if PathExists(filepath.Join(filename, "1", "environ")) {
+ contents, err := ReadFile(filepath.Join(filename, "1", "environ"))
+
+ if err == nil {
+ if strings.Contains(contents, "container=lxc") {
+ system = "lxc"
+ role = "guest"
+ }
+ }
+ }
+
+ if PathExists(filepath.Join(filename, "self", "cgroup")) {
+ contents, err := ReadLines(filepath.Join(filename, "self", "cgroup"))
+ if err == nil {
+ if StringsContains(contents, "lxc") {
+ system = "lxc"
+ role = "guest"
+ } else if StringsContains(contents, "docker") {
+ system = "docker"
+ role = "guest"
+ } else if StringsContains(contents, "machine-rkt") {
+ system = "rkt"
+ role = "guest"
+ } else if PathExists("/usr/bin/lxc-version") {
+ system = "lxc"
+ role = "host"
+ }
+ }
+ }
+
+ if PathExists(HostEtc("os-release")) {
+ p, _, err := GetOSRelease()
+ if err == nil && p == "coreos" {
+ system = "rkt" // Is it true?
+ role = "host"
+ }
+ }
+
+ // before returning for the first time, cache the system and role
+ cachedVirtOnce.Do(func() {
+ cachedVirtMutex.Lock()
+ defer cachedVirtMutex.Unlock()
+ cachedVirtMap = map[string]string{
+ "system": system,
+ "role": role,
+ }
+ })
+
+ return system, role, nil
+}
+
+func GetOSRelease() (platform string, version string, err error) {
+ contents, err := ReadLines(HostEtc("os-release"))
+ if err != nil {
+ return "", "", nil // return empty
+ }
+ for _, line := range contents {
+ field := strings.Split(line, "=")
+ if len(field) < 2 {
+ continue
+ }
+ switch field[0] {
+ case "ID": // use ID for lowercase
+ platform = trimQuotes(field[1])
+ case "VERSION":
+ version = trimQuotes(field[1])
+ }
+ }
+
+ // cleanup amazon ID
+ if platform == "amzn" {
+ platform = "amazon"
+ }
+
+ return platform, version, nil
+}
+
+// Remove quotes of the source string
+func trimQuotes(s string) string {
+ if len(s) >= 2 {
+ if s[0] == '"' && s[len(s)-1] == '"' {
+ return s[1 : len(s)-1]
+ }
+ }
+ return s
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go
new file mode 100644
index 000000000..58d76f334
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go
@@ -0,0 +1,66 @@
+//go:build openbsd
+// +build openbsd
+
+package common
+
+import (
+ "os"
+ "os/exec"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func DoSysctrl(mib string) ([]string, error) {
+ cmd := exec.Command("sysctl", "-n", mib)
+ cmd.Env = getSysctrlEnv(os.Environ())
+ out, err := cmd.Output()
+ if err != nil {
+ return []string{}, err
+ }
+ v := strings.Replace(string(out), "{ ", "", 1)
+ v = strings.Replace(string(v), " }", "", 1)
+ values := strings.Fields(string(v))
+
+ return values, nil
+}
+
+func CallSyscall(mib []int32) ([]byte, uint64, error) {
+ mibptr := unsafe.Pointer(&mib[0])
+ miblen := uint64(len(mib))
+
+ // get required buffer size
+ length := uint64(0)
+ _, _, err := unix.Syscall6(
+ unix.SYS___SYSCTL,
+ uintptr(mibptr),
+ uintptr(miblen),
+ 0,
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ var b []byte
+ return b, length, err
+ }
+ if length == 0 {
+ var b []byte
+ return b, length, err
+ }
+ // get proc info itself
+ buf := make([]byte, length)
+ _, _, err = unix.Syscall6(
+ unix.SYS___SYSCTL,
+ uintptr(mibptr),
+ uintptr(miblen),
+ uintptr(unsafe.Pointer(&buf[0])),
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ return buf, length, err
+ }
+
+ return buf, length, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go
new file mode 100644
index 000000000..4af7e5c2a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go
@@ -0,0 +1,62 @@
+//go:build linux || freebsd || darwin || openbsd
+// +build linux freebsd darwin openbsd
+
+package common
+
+import (
+ "context"
+ "errors"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ...string) ([]string, error) {
+ var cmd []string
+ if pid == 0 { // will get from all processes.
+ cmd = []string{"-a", "-n", "-P"}
+ } else {
+ cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))}
+ }
+ cmd = append(cmd, args...)
+ out, err := invoke.CommandWithContext(ctx, "lsof", cmd...)
+ if err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ return []string{}, err
+ }
+ // if no pid found, lsof returns code 1.
+ if err.Error() == "exit status 1" && len(out) == 0 {
+ return []string{}, nil
+ }
+ }
+ lines := strings.Split(string(out), "\n")
+
+ var ret []string
+ for _, l := range lines[1:] {
+ if len(l) == 0 {
+ continue
+ }
+ ret = append(ret, l)
+ }
+ return ret, nil
+}
+
+func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) {
+ out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid)))
+ if err != nil {
+ return []int32{}, err
+ }
+ lines := strings.Split(string(out), "\n")
+ ret := make([]int32, 0, len(lines))
+ for _, l := range lines {
+ if len(l) == 0 {
+ continue
+ }
+ i, err := strconv.ParseInt(l, 10, 32)
+ if err != nil {
+ continue
+ }
+ ret = append(ret, int32(i))
+ }
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go
new file mode 100644
index 000000000..295b70bfa
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go
@@ -0,0 +1,301 @@
+//go:build windows
+// +build windows
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/yusufpapurcu/wmi"
+ "golang.org/x/sys/windows"
+)
+
+// for double values
+type PDH_FMT_COUNTERVALUE_DOUBLE struct {
+ CStatus uint32
+ DoubleValue float64
+}
+
+// for 64 bit integer values
+type PDH_FMT_COUNTERVALUE_LARGE struct {
+ CStatus uint32
+ LargeValue int64
+}
+
+// for long values
+type PDH_FMT_COUNTERVALUE_LONG struct {
+ CStatus uint32
+ LongValue int32
+ padding [4]byte
+}
+
+// windows system const
+const (
+ ERROR_SUCCESS = 0
+ ERROR_FILE_NOT_FOUND = 2
+ DRIVE_REMOVABLE = 2
+ DRIVE_FIXED = 3
+ HKEY_LOCAL_MACHINE = 0x80000002
+ RRF_RT_REG_SZ = 0x00000002
+ RRF_RT_REG_DWORD = 0x00000010
+ PDH_FMT_LONG = 0x00000100
+ PDH_FMT_DOUBLE = 0x00000200
+ PDH_FMT_LARGE = 0x00000400
+ PDH_INVALID_DATA = 0xc0000bc6
+ PDH_INVALID_HANDLE = 0xC0000bbc
+ PDH_NO_DATA = 0x800007d5
+
+ STATUS_BUFFER_OVERFLOW = 0x80000005
+ STATUS_BUFFER_TOO_SMALL = 0xC0000023
+ STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
+)
+
+const (
+ ProcessBasicInformation = 0
+ ProcessWow64Information = 26
+ ProcessQueryInformation = windows.PROCESS_DUP_HANDLE | windows.PROCESS_QUERY_INFORMATION
+
+ SystemExtendedHandleInformationClass = 64
+)
+
+var (
+ Modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ ModNt = windows.NewLazySystemDLL("ntdll.dll")
+ ModPdh = windows.NewLazySystemDLL("pdh.dll")
+ ModPsapi = windows.NewLazySystemDLL("psapi.dll")
+
+ ProcGetSystemTimes = Modkernel32.NewProc("GetSystemTimes")
+ ProcNtQuerySystemInformation = ModNt.NewProc("NtQuerySystemInformation")
+ ProcRtlGetNativeSystemInformation = ModNt.NewProc("RtlGetNativeSystemInformation")
+ ProcRtlNtStatusToDosError = ModNt.NewProc("RtlNtStatusToDosError")
+ ProcNtQueryInformationProcess = ModNt.NewProc("NtQueryInformationProcess")
+ ProcNtReadVirtualMemory = ModNt.NewProc("NtReadVirtualMemory")
+ ProcNtWow64QueryInformationProcess64 = ModNt.NewProc("NtWow64QueryInformationProcess64")
+ ProcNtWow64ReadVirtualMemory64 = ModNt.NewProc("NtWow64ReadVirtualMemory64")
+
+ PdhOpenQuery = ModPdh.NewProc("PdhOpenQuery")
+ PdhAddEnglishCounterW = ModPdh.NewProc("PdhAddEnglishCounterW")
+ PdhCollectQueryData = ModPdh.NewProc("PdhCollectQueryData")
+ PdhGetFormattedCounterValue = ModPdh.NewProc("PdhGetFormattedCounterValue")
+ PdhCloseQuery = ModPdh.NewProc("PdhCloseQuery")
+
+ procQueryDosDeviceW = Modkernel32.NewProc("QueryDosDeviceW")
+)
+
+type FILETIME struct {
+ DwLowDateTime uint32
+ DwHighDateTime uint32
+}
+
+// borrowed from net/interface_windows.go
+func BytePtrToString(p *uint8) string {
+ a := (*[10000]uint8)(unsafe.Pointer(p))
+ i := 0
+ for a[i] != 0 {
+ i++
+ }
+ return string(a[:i])
+}
+
+// CounterInfo struct is used to track a windows performance counter
+// copied from https://github.com/mackerelio/mackerel-agent/
+type CounterInfo struct {
+ PostName string
+ CounterName string
+ Counter windows.Handle
+}
+
+// CreateQuery with a PdhOpenQuery call
+// copied from https://github.com/mackerelio/mackerel-agent/
+func CreateQuery() (windows.Handle, error) {
+ var query windows.Handle
+ r, _, err := PdhOpenQuery.Call(0, 0, uintptr(unsafe.Pointer(&query)))
+ if r != 0 {
+ return 0, err
+ }
+ return query, nil
+}
+
+// CreateCounter with a PdhAddEnglishCounterW call
+func CreateCounter(query windows.Handle, pname, cname string) (*CounterInfo, error) {
+ var counter windows.Handle
+ r, _, err := PdhAddEnglishCounterW.Call(
+ uintptr(query),
+ uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(cname))),
+ 0,
+ uintptr(unsafe.Pointer(&counter)))
+ if r != 0 {
+ return nil, err
+ }
+ return &CounterInfo{
+ PostName: pname,
+ CounterName: cname,
+ Counter: counter,
+ }, nil
+}
+
+// GetCounterValue get counter value from handle
+// adapted from https://github.com/mackerelio/mackerel-agent/
+func GetCounterValue(counter windows.Handle) (float64, error) {
+ var value PDH_FMT_COUNTERVALUE_DOUBLE
+ r, _, err := PdhGetFormattedCounterValue.Call(uintptr(counter), PDH_FMT_DOUBLE, uintptr(0), uintptr(unsafe.Pointer(&value)))
+ if r != 0 && r != PDH_INVALID_DATA {
+ return 0.0, err
+ }
+ return value.DoubleValue, nil
+}
+
+type Win32PerformanceCounter struct {
+ PostName string
+ CounterName string
+ Query windows.Handle
+ Counter windows.Handle
+}
+
+func NewWin32PerformanceCounter(postName, counterName string) (*Win32PerformanceCounter, error) {
+ query, err := CreateQuery()
+ if err != nil {
+ return nil, err
+ }
+ counter := Win32PerformanceCounter{
+ Query: query,
+ PostName: postName,
+ CounterName: counterName,
+ }
+ r, _, err := PdhAddEnglishCounterW.Call(
+ uintptr(counter.Query),
+ uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(counter.CounterName))),
+ 0,
+ uintptr(unsafe.Pointer(&counter.Counter)),
+ )
+ if r != 0 {
+ return nil, err
+ }
+ return &counter, nil
+}
+
+func (w *Win32PerformanceCounter) GetValue() (float64, error) {
+ r, _, err := PdhCollectQueryData.Call(uintptr(w.Query))
+ if r != 0 && err != nil {
+ if r == PDH_NO_DATA {
+ return 0.0, fmt.Errorf("%w: this counter has not data", err)
+ }
+ return 0.0, err
+ }
+
+ return GetCounterValue(w.Counter)
+}
+
+func ProcessorQueueLengthCounter() (*Win32PerformanceCounter, error) {
+ return NewWin32PerformanceCounter("processor_queue_length", `\System\Processor Queue Length`)
+}
+
+// WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging
+func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error {
+ if _, ok := ctx.Deadline(); !ok {
+ ctxTimeout, cancel := context.WithTimeout(ctx, Timeout)
+ defer cancel()
+ ctx = ctxTimeout
+ }
+
+ errChan := make(chan error, 1)
+ go func() {
+ errChan <- wmi.Query(query, dst, connectServerArgs...)
+ }()
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case err := <-errChan:
+ return err
+ }
+}
+
+// Convert paths using native DOS format like:
+// "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+// into:
+// "C:\Windows\systemew\file.txt"
+func ConvertDOSPath(p string) string {
+ rawDrive := strings.Join(strings.Split(p, `\`)[:3], `\`)
+
+ for d := 'A'; d <= 'Z'; d++ {
+ szDeviceName := string(d) + ":"
+ szTarget := make([]uint16, 512)
+ ret, _, _ := procQueryDosDeviceW.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(szDeviceName))),
+ uintptr(unsafe.Pointer(&szTarget[0])),
+ uintptr(len(szTarget)))
+ if ret != 0 && windows.UTF16ToString(szTarget[:]) == rawDrive {
+ return filepath.Join(szDeviceName, p[len(rawDrive):])
+ }
+ }
+ return p
+}
+
+type NtStatus uint32
+
+func (s NtStatus) Error() error {
+ if s == 0 {
+ return nil
+ }
+ return fmt.Errorf("NtStatus 0x%08x", uint32(s))
+}
+
+func (s NtStatus) IsError() bool {
+ return s>>30 == 3
+}
+
+type SystemExtendedHandleTableEntryInformation struct {
+ Object uintptr
+ UniqueProcessId uintptr
+ HandleValue uintptr
+ GrantedAccess uint32
+ CreatorBackTraceIndex uint16
+ ObjectTypeIndex uint16
+ HandleAttributes uint32
+ Reserved uint32
+}
+
+type SystemExtendedHandleInformation struct {
+ NumberOfHandles uintptr
+ Reserved uintptr
+ Handles [1]SystemExtendedHandleTableEntryInformation
+}
+
+// CallWithExpandingBuffer https://github.com/hillu/go-ntdll
+func CallWithExpandingBuffer(fn func() NtStatus, buf *[]byte, resultLength *uint32) NtStatus {
+ for {
+ if st := fn(); st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH {
+ if int(*resultLength) <= cap(*buf) {
+ (*reflect.SliceHeader)(unsafe.Pointer(buf)).Len = int(*resultLength)
+ } else {
+ *buf = make([]byte, int(*resultLength))
+ }
+ continue
+ } else {
+ if !st.IsError() {
+ *buf = (*buf)[:int(*resultLength)]
+ }
+ return st
+ }
+ }
+}
+
+func NtQuerySystemInformation(
+ SystemInformationClass uint32,
+ SystemInformation *byte,
+ SystemInformationLength uint32,
+ ReturnLength *uint32,
+) NtStatus {
+ r0, _, _ := ProcNtQuerySystemInformation.Call(
+ uintptr(SystemInformationClass),
+ uintptr(unsafe.Pointer(SystemInformation)),
+ uintptr(SystemInformationLength),
+ uintptr(unsafe.Pointer(ReturnLength)))
+ return NtStatus(r0)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go
new file mode 100644
index 000000000..147cfdc4b
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go
@@ -0,0 +1,10 @@
+package common
+
+import "unsafe"
+
+// IsLittleEndian checks if the current platform uses little-endian.
+// copied from https://github.com/ntrrg/ntgo/blob/v0.8.0/runtime/infrastructure.go#L16 (MIT License)
+func IsLittleEndian() bool {
+ var x int16 = 0x0011
+ return *(*byte)(unsafe.Pointer(&x)) == 0x11
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go
new file mode 100644
index 000000000..8c35b1722
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go
@@ -0,0 +1,18 @@
+package common
+
+import (
+ "context"
+ "time"
+)
+
+// Sleep awaits for provided interval.
+// Can be interrupted by context cancelation.
+func Sleep(ctx context.Context, interval time.Duration) error {
+ timer := time.NewTimer(interval)
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-timer.C:
+ return nil
+ }
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem.go
new file mode 100644
index 000000000..ff960dacc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem.go
@@ -0,0 +1,118 @@
+package mem
+
+import (
+ "encoding/json"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var invoke common.Invoker = common.Invoke{}
+
+// Memory usage statistics. Total, Available and Used contain numbers of bytes
+// for human consumption.
+//
+// The other fields in this struct contain kernel specific values.
+type VirtualMemoryStat struct {
+ // Total amount of RAM on this system
+ Total uint64 `json:"total"`
+
+ // RAM available for programs to allocate
+ //
+ // This value is computed from the kernel specific values.
+ Available uint64 `json:"available"`
+
+ // RAM used by programs
+ //
+ // This value is computed from the kernel specific values.
+ Used uint64 `json:"used"`
+
+ // Percentage of RAM used by programs
+ //
+ // This value is computed from the kernel specific values.
+ UsedPercent float64 `json:"usedPercent"`
+
+ // This is the kernel's notion of free memory; RAM chips whose bits nobody
+ // cares about the value of right now. For a human consumable number,
+ // Available is what you really want.
+ Free uint64 `json:"free"`
+
+ // OS X / BSD specific numbers:
+ // http://www.macyourself.com/2010/02/17/what-is-free-wired-active-and-inactive-system-memory-ram/
+ Active uint64 `json:"active"`
+ Inactive uint64 `json:"inactive"`
+ Wired uint64 `json:"wired"`
+
+ // FreeBSD specific numbers:
+ // https://reviews.freebsd.org/D8467
+ Laundry uint64 `json:"laundry"`
+
+ // Linux specific numbers
+ // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html
+ // https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+ // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting
+ Buffers uint64 `json:"buffers"`
+ Cached uint64 `json:"cached"`
+ WriteBack uint64 `json:"writeBack"`
+ Dirty uint64 `json:"dirty"`
+ WriteBackTmp uint64 `json:"writeBackTmp"`
+ Shared uint64 `json:"shared"`
+ Slab uint64 `json:"slab"`
+ Sreclaimable uint64 `json:"sreclaimable"`
+ Sunreclaim uint64 `json:"sunreclaim"`
+ PageTables uint64 `json:"pageTables"`
+ SwapCached uint64 `json:"swapCached"`
+ CommitLimit uint64 `json:"commitLimit"`
+ CommittedAS uint64 `json:"committedAS"`
+ HighTotal uint64 `json:"highTotal"`
+ HighFree uint64 `json:"highFree"`
+ LowTotal uint64 `json:"lowTotal"`
+ LowFree uint64 `json:"lowFree"`
+ SwapTotal uint64 `json:"swapTotal"`
+ SwapFree uint64 `json:"swapFree"`
+ Mapped uint64 `json:"mapped"`
+ VmallocTotal uint64 `json:"vmallocTotal"`
+ VmallocUsed uint64 `json:"vmallocUsed"`
+ VmallocChunk uint64 `json:"vmallocChunk"`
+ HugePagesTotal uint64 `json:"hugePagesTotal"`
+ HugePagesFree uint64 `json:"hugePagesFree"`
+ HugePagesRsvd uint64 `json:"hugePagesRsvd"`
+ HugePagesSurp uint64 `json:"hugePagesSurp"`
+ HugePageSize uint64 `json:"hugePageSize"`
+}
+
+type SwapMemoryStat struct {
+ Total uint64 `json:"total"`
+ Used uint64 `json:"used"`
+ Free uint64 `json:"free"`
+ UsedPercent float64 `json:"usedPercent"`
+ Sin uint64 `json:"sin"`
+ Sout uint64 `json:"sout"`
+ PgIn uint64 `json:"pgIn"`
+ PgOut uint64 `json:"pgOut"`
+ PgFault uint64 `json:"pgFault"`
+
+ // Linux specific numbers
+ // https://www.kernel.org/doc/Documentation/cgroup-v2.txt
+ PgMajFault uint64 `json:"pgMajFault"`
+}
+
+func (m VirtualMemoryStat) String() string {
+ s, _ := json.Marshal(m)
+ return string(s)
+}
+
+func (m SwapMemoryStat) String() string {
+ s, _ := json.Marshal(m)
+ return string(s)
+}
+
+type SwapDevice struct {
+ Name string `json:"name"`
+ UsedBytes uint64 `json:"usedBytes"`
+ FreeBytes uint64 `json:"freeBytes"`
+}
+
+func (m SwapDevice) String() string {
+ s, _ := json.Marshal(m)
+ return string(s)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go
new file mode 100644
index 000000000..22a6a4e92
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go
@@ -0,0 +1,16 @@
+//go:build aix
+// +build aix
+
+package mem
+
+import (
+ "context"
+)
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go
new file mode 100644
index 000000000..67e11dff8
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go
@@ -0,0 +1,51 @@
+//go:build aix && cgo
+// +build aix,cgo
+
+package mem
+
+import (
+ "context"
+
+ "github.com/power-devops/perfstat"
+)
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ m, err := perfstat.MemoryTotalStat()
+ if err != nil {
+ return nil, err
+ }
+ pagesize := uint64(4096)
+ ret := VirtualMemoryStat{
+ Total: uint64(m.RealTotal) * pagesize,
+ Available: uint64(m.RealAvailable) * pagesize,
+ Free: uint64(m.RealFree) * pagesize,
+ Used: uint64(m.RealInUse) * pagesize,
+ UsedPercent: 100 * float64(m.RealInUse) / float64(m.RealTotal),
+ Active: uint64(m.VirtualActive) * pagesize,
+ SwapTotal: uint64(m.PgSpTotal) * pagesize,
+ SwapFree: uint64(m.PgSpFree) * pagesize,
+ }
+ return &ret, nil
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ m, err := perfstat.MemoryTotalStat()
+ if err != nil {
+ return nil, err
+ }
+ pagesize := uint64(4096)
+ swapUsed := uint64(m.PgSpTotal-m.PgSpFree-m.PgSpRsvd) * pagesize
+ swapTotal := uint64(m.PgSpTotal) * pagesize
+ ret := SwapMemoryStat{
+ Total: swapTotal,
+ Free: uint64(m.PgSpFree) * pagesize,
+ Used: swapUsed,
+ UsedPercent: float64(100*swapUsed) / float64(swapTotal),
+ Sin: uint64(m.PgSpIn),
+ Sout: uint64(m.PgSpOut),
+ PgIn: uint64(m.PageIn),
+ PgOut: uint64(m.PageOut),
+ PgFault: uint64(m.PageFaults),
+ }
+ return &ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go
new file mode 100644
index 000000000..09ffd8ed1
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go
@@ -0,0 +1,81 @@
+//go:build aix && !cgo
+// +build aix,!cgo
+
+package mem
+
+import (
+ "context"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var whiteSpaces = regexp.MustCompile(`\s+`)
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ vmem, swap, err := callSVMon(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if vmem.Total == 0 {
+ return nil, common.ErrNotImplementedError
+ }
+ vmem.SwapTotal = swap.Total
+ vmem.SwapFree = swap.Free
+ return vmem, nil
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ _, swap, err := callSVMon(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if swap.Total == 0 {
+ return nil, common.ErrNotImplementedError
+ }
+ return swap, nil
+}
+
+func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) {
+ out, err := invoke.CommandWithContext(ctx, "svmon", "-G")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pagesize := uint64(4096)
+ vmem := &VirtualMemoryStat{}
+ swap := &SwapMemoryStat{}
+ for _, line := range strings.Split(string(out), "\n") {
+ if strings.HasPrefix(line, "memory") {
+ p := whiteSpaces.Split(line, 7)
+ if len(p) > 2 {
+ if t, err := strconv.ParseUint(p[1], 10, 64); err == nil {
+ vmem.Total = t * pagesize
+ }
+ if t, err := strconv.ParseUint(p[2], 10, 64); err == nil {
+ vmem.Used = t * pagesize
+ if vmem.Total > 0 {
+ vmem.UsedPercent = 100 * float64(vmem.Used) / float64(vmem.Total)
+ }
+ }
+ if t, err := strconv.ParseUint(p[3], 10, 64); err == nil {
+ vmem.Free = t * pagesize
+ }
+ }
+ } else if strings.HasPrefix(line, "pg space") {
+ p := whiteSpaces.Split(line, 4)
+ if len(p) > 3 {
+ if t, err := strconv.ParseUint(p[2], 10, 64); err == nil {
+ swap.Total = t * pagesize
+ }
+ if t, err := strconv.ParseUint(p[3], 10, 64); err == nil {
+ swap.Free = swap.Total - t * pagesize
+ }
+ }
+ break
+ }
+ }
+ return vmem, swap, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go
new file mode 100644
index 000000000..ce930fbe4
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go
@@ -0,0 +1,87 @@
+//go:build freebsd || openbsd
+// +build freebsd openbsd
+
+package mem
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const swapCommand = "swapctl"
+
+// swapctl column indexes
+const (
+ nameCol = 0
+ totalKiBCol = 1
+ usedKiBCol = 2
+)
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ output, err := invoke.CommandWithContext(ctx, swapCommand, "-lk")
+ if err != nil {
+ return nil, fmt.Errorf("could not execute %q: %w", swapCommand, err)
+ }
+
+ return parseSwapctlOutput(string(output))
+}
+
+func parseSwapctlOutput(output string) ([]*SwapDevice, error) {
+ lines := strings.Split(output, "\n")
+ if len(lines) == 0 {
+ return nil, fmt.Errorf("could not parse output of %q: no lines in %q", swapCommand, output)
+ }
+
+ // Check header headerFields are as expected.
+ header := lines[0]
+ header = strings.ToLower(header)
+ header = strings.ReplaceAll(header, ":", "")
+ headerFields := strings.Fields(header)
+ if len(headerFields) < usedKiBCol {
+ return nil, fmt.Errorf("couldn't parse %q: too few fields in header %q", swapCommand, header)
+ }
+ if headerFields[nameCol] != "device" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[nameCol], "device")
+ }
+ if headerFields[totalKiBCol] != "1kb-blocks" && headerFields[totalKiBCol] != "1k-blocks" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[totalKiBCol], "1kb-blocks")
+ }
+ if headerFields[usedKiBCol] != "used" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[usedKiBCol], "used")
+ }
+
+ var swapDevices []*SwapDevice
+ for _, line := range lines[1:] {
+ if line == "" {
+ continue // the terminal line is typically empty
+ }
+ fields := strings.Fields(line)
+ if len(fields) < usedKiBCol {
+ return nil, fmt.Errorf("couldn't parse %q: too few fields", swapCommand)
+ }
+
+ totalKiB, err := strconv.ParseUint(fields[totalKiBCol], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse 'Size' column in %q: %w", swapCommand, err)
+ }
+
+ usedKiB, err := strconv.ParseUint(fields[usedKiBCol], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse 'Used' column in %q: %w", swapCommand, err)
+ }
+
+ swapDevices = append(swapDevices, &SwapDevice{
+ Name: fields[nameCol],
+ UsedBytes: usedKiB * 1024,
+ FreeBytes: (totalKiB - usedKiB) * 1024,
+ })
+ }
+
+ return swapDevices, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go
new file mode 100644
index 000000000..0527dd93c
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go
@@ -0,0 +1,71 @@
+//go:build darwin
+// +build darwin
+
+package mem
+
+import (
+ "context"
+ "fmt"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+func getHwMemsize() (uint64, error) {
+ total, err := unix.SysctlUint64("hw.memsize")
+ if err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// xsw_usage in sys/sysctl.h
+type swapUsage struct {
+ Total uint64
+ Avail uint64
+ Used uint64
+ Pagesize int32
+ Encrypted bool
+}
+
+// SwapMemory returns swapinfo.
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go
+ var ret *SwapMemoryStat
+
+ value, err := unix.SysctlRaw("vm.swapusage")
+ if err != nil {
+ return ret, err
+ }
+ if len(value) != 32 {
+ return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value))
+ }
+ swap := (*swapUsage)(unsafe.Pointer(&value[0]))
+
+ u := float64(0)
+ if swap.Total != 0 {
+ u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0
+ }
+
+ ret = &SwapMemoryStat{
+ Total: swap.Total,
+ Used: swap.Used,
+ Free: swap.Avail,
+ UsedPercent: u,
+ }
+
+ return ret, nil
+}
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go
new file mode 100644
index 000000000..e5da7dcdb
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go
@@ -0,0 +1,58 @@
+//go:build darwin && cgo
+// +build darwin,cgo
+
+package mem
+
+/*
+#include
+#include
+*/
+import "C"
+
+import (
+ "context"
+ "fmt"
+ "unsafe"
+)
+
+// VirtualMemory returns VirtualmemoryStat.
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT)
+ var vmstat C.vm_statistics_data_t
+
+ status := C.host_statistics(C.host_t(C.mach_host_self()),
+ C.HOST_VM_INFO,
+ C.host_info_t(unsafe.Pointer(&vmstat)),
+ &count)
+
+ if status != C.KERN_SUCCESS {
+ return nil, fmt.Errorf("host_statistics error=%d", status)
+ }
+
+ pageSize := uint64(C.vm_kernel_page_size)
+ total, err := getHwMemsize()
+ if err != nil {
+ return nil, err
+ }
+ totalCount := C.natural_t(total / pageSize)
+
+ availableCount := vmstat.inactive_count + vmstat.free_count
+ usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount)
+
+ usedCount := totalCount - availableCount
+
+ return &VirtualMemoryStat{
+ Total: total,
+ Available: pageSize * uint64(availableCount),
+ Used: pageSize * uint64(usedCount),
+ UsedPercent: usedPercent,
+ Free: pageSize * uint64(vmstat.free_count),
+ Active: pageSize * uint64(vmstat.active_count),
+ Inactive: pageSize * uint64(vmstat.inactive_count),
+ Wired: pageSize * uint64(vmstat.wire_count),
+ }, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go
new file mode 100644
index 000000000..c93931680
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go
@@ -0,0 +1,89 @@
+//go:build darwin && !cgo
+// +build darwin,!cgo
+
+package mem
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// Runs vm_stat and returns Free and inactive pages
+func getVMStat(vms *VirtualMemoryStat) error {
+ out, err := invoke.Command("vm_stat")
+ if err != nil {
+ return err
+ }
+ return parseVMStat(string(out), vms)
+}
+
+func parseVMStat(out string, vms *VirtualMemoryStat) error {
+ var err error
+
+ lines := strings.Split(out, "\n")
+ pagesize := uint64(unix.Getpagesize())
+ for _, line := range lines {
+ fields := strings.Split(line, ":")
+ if len(fields) < 2 {
+ continue
+ }
+ key := strings.TrimSpace(fields[0])
+ value := strings.Trim(fields[1], " .")
+ switch key {
+ case "Pages free":
+ free, e := strconv.ParseUint(value, 10, 64)
+ if e != nil {
+ err = e
+ }
+ vms.Free = free * pagesize
+ case "Pages inactive":
+ inactive, e := strconv.ParseUint(value, 10, 64)
+ if e != nil {
+ err = e
+ }
+ vms.Inactive = inactive * pagesize
+ case "Pages active":
+ active, e := strconv.ParseUint(value, 10, 64)
+ if e != nil {
+ err = e
+ }
+ vms.Active = active * pagesize
+ case "Pages wired down":
+ wired, e := strconv.ParseUint(value, 10, 64)
+ if e != nil {
+ err = e
+ }
+ vms.Wired = wired * pagesize
+ }
+ }
+ return err
+}
+
+// VirtualMemory returns VirtualmemoryStat.
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ ret := &VirtualMemoryStat{}
+
+ total, err := getHwMemsize()
+ if err != nil {
+ return nil, err
+ }
+ err = getVMStat(ret)
+ if err != nil {
+ return nil, err
+ }
+
+ ret.Available = ret.Free + ret.Inactive
+ ret.Total = total
+
+ ret.Used = ret.Total - ret.Available
+ ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total)
+
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go
new file mode 100644
index 000000000..0b6c528f2
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go
@@ -0,0 +1,34 @@
+//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix
+// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix
+
+package mem
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go
new file mode 100644
index 000000000..44543ef74
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go
@@ -0,0 +1,168 @@
+//go:build freebsd
+// +build freebsd
+
+package mem
+
+import (
+ "context"
+ "errors"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ pageSize, err := common.SysctlUint("vm.stats.vm.v_page_size")
+ if err != nil {
+ return nil, err
+ }
+ physmem, err := common.SysctlUint("hw.physmem")
+ if err != nil {
+ return nil, err
+ }
+
+ free, err := common.SysctlUint("vm.stats.vm.v_free_count")
+ if err != nil {
+ return nil, err
+ }
+ active, err := common.SysctlUint("vm.stats.vm.v_active_count")
+ if err != nil {
+ return nil, err
+ }
+ inactive, err := common.SysctlUint("vm.stats.vm.v_inactive_count")
+ if err != nil {
+ return nil, err
+ }
+ buffers, err := common.SysctlUint("vfs.bufspace")
+ if err != nil {
+ return nil, err
+ }
+ wired, err := common.SysctlUint("vm.stats.vm.v_wire_count")
+ if err != nil {
+ return nil, err
+ }
+ var cached, laundry uint64
+ osreldate, _ := common.SysctlUint("kern.osreldate")
+ if osreldate < 1102000 {
+ cached, err = common.SysctlUint("vm.stats.vm.v_cache_count")
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ laundry, err = common.SysctlUint("vm.stats.vm.v_laundry_count")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ p := pageSize
+ ret := &VirtualMemoryStat{
+ Total: physmem,
+ Free: free * p,
+ Active: active * p,
+ Inactive: inactive * p,
+ Cached: cached * p,
+ Buffers: buffers,
+ Wired: wired * p,
+ Laundry: laundry * p,
+ }
+
+ ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Laundry
+ ret.Used = ret.Total - ret.Available
+ ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0
+
+ return ret, nil
+}
+
+// Return swapinfo
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+// Constants from vm/vm_param.h
+// nolint: golint
+const (
+ XSWDEV_VERSION11 = 1
+ XSWDEV_VERSION = 2
+)
+
+// Types from vm/vm_param.h
+type xswdev struct {
+ Version uint32 // Version is the version
+ Dev uint64 // Dev is the device identifier
+ Flags int32 // Flags is the swap flags applied to the device
+ NBlks int32 // NBlks is the total number of blocks
+ Used int32 // Used is the number of blocks used
+}
+
+// xswdev11 is a compatibility for under FreeBSD 11
+// sys/vm/swap_pager.c
+type xswdev11 struct {
+ Version uint32 // Version is the version
+ Dev uint32 // Dev is the device identifier
+ Flags int32 // Flags is the swap flags applied to the device
+ NBlks int32 // NBlks is the total number of blocks
+ Used int32 // Used is the number of blocks used
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ // FreeBSD can have multiple swap devices so we total them up
+ i, err := common.SysctlUint("vm.nswapdev")
+ if err != nil {
+ return nil, err
+ }
+
+ if i == 0 {
+ return nil, errors.New("no swap devices found")
+ }
+
+ c := int(i)
+
+ i, err = common.SysctlUint("vm.stats.vm.v_page_size")
+ if err != nil {
+ return nil, err
+ }
+ pageSize := i
+
+ var buf []byte
+ s := &SwapMemoryStat{}
+ for n := 0; n < c; n++ {
+ buf, err = unix.SysctlRaw("vm.swap_info", n)
+ if err != nil {
+ return nil, err
+ }
+
+ // first, try to parse with version 2
+ xsw := (*xswdev)(unsafe.Pointer(&buf[0]))
+ if xsw.Version == XSWDEV_VERSION11 {
+ // this is version 1, so try to parse again
+ xsw := (*xswdev11)(unsafe.Pointer(&buf[0]))
+ if xsw.Version != XSWDEV_VERSION11 {
+ return nil, errors.New("xswdev version mismatch(11)")
+ }
+ s.Total += uint64(xsw.NBlks)
+ s.Used += uint64(xsw.Used)
+ } else if xsw.Version != XSWDEV_VERSION {
+ return nil, errors.New("xswdev version mismatch")
+ } else {
+ s.Total += uint64(xsw.NBlks)
+ s.Used += uint64(xsw.Used)
+ }
+
+ }
+
+ if s.Total != 0 {
+ s.UsedPercent = float64(s.Used) / float64(s.Total) * 100
+ }
+ s.Total *= pageSize
+ s.Used *= pageSize
+ s.Free = s.Total - s.Used
+
+ return s, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go
new file mode 100644
index 000000000..9a5d693b1
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go
@@ -0,0 +1,525 @@
+//go:build linux
+// +build linux
+
+package mem
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+type VirtualMemoryExStat struct {
+ ActiveFile uint64 `json:"activefile"`
+ InactiveFile uint64 `json:"inactivefile"`
+ ActiveAnon uint64 `json:"activeanon"`
+ InactiveAnon uint64 `json:"inactiveanon"`
+ Unevictable uint64 `json:"unevictable"`
+}
+
+func (v VirtualMemoryExStat) String() string {
+ s, _ := json.Marshal(v)
+ return string(s)
+}
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ vm, _, err := fillFromMeminfoWithContext()
+ if err != nil {
+ return nil, err
+ }
+ return vm, nil
+}
+
+func VirtualMemoryEx() (*VirtualMemoryExStat, error) {
+ return VirtualMemoryExWithContext(context.Background())
+}
+
+func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) {
+ _, vmEx, err := fillFromMeminfoWithContext()
+ if err != nil {
+ return nil, err
+ }
+ return vmEx, nil
+}
+
+func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, error) {
+ filename := common.HostProc("meminfo")
+ lines, _ := common.ReadLines(filename)
+
+ // flag if MemAvailable is in /proc/meminfo (kernel 3.14+)
+ memavail := false
+ activeFile := false // "Active(file)" not available: 2.6.28 / Dec 2008
+ inactiveFile := false // "Inactive(file)" not available: 2.6.28 / Dec 2008
+ sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006
+
+ ret := &VirtualMemoryStat{}
+ retEx := &VirtualMemoryExStat{}
+
+ for _, line := range lines {
+ fields := strings.Split(line, ":")
+ if len(fields) != 2 {
+ continue
+ }
+ key := strings.TrimSpace(fields[0])
+ value := strings.TrimSpace(fields[1])
+ value = strings.Replace(value, " kB", "", -1)
+
+ switch key {
+ case "MemTotal":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Total = t * 1024
+ case "MemFree":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Free = t * 1024
+ case "MemAvailable":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ memavail = true
+ ret.Available = t * 1024
+ case "Buffers":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Buffers = t * 1024
+ case "Cached":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Cached = t * 1024
+ case "Active":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Active = t * 1024
+ case "Inactive":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Inactive = t * 1024
+ case "Active(anon)":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ retEx.ActiveAnon = t * 1024
+ case "Inactive(anon)":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ retEx.InactiveAnon = t * 1024
+ case "Active(file)":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ activeFile = true
+ retEx.ActiveFile = t * 1024
+ case "Inactive(file)":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ inactiveFile = true
+ retEx.InactiveFile = t * 1024
+ case "Unevictable":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ retEx.Unevictable = t * 1024
+ case "WriteBack":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.WriteBack = t * 1024
+ case "WriteBackTmp":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.WriteBackTmp = t * 1024
+ case "Dirty":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Dirty = t * 1024
+ case "Shmem":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Shared = t * 1024
+ case "Slab":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Slab = t * 1024
+ case "SReclaimable":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ sReclaimable = true
+ ret.Sreclaimable = t * 1024
+ case "SUnreclaim":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Sunreclaim = t * 1024
+ case "PageTables":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.PageTables = t * 1024
+ case "SwapCached":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.SwapCached = t * 1024
+ case "CommitLimit":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.CommitLimit = t * 1024
+ case "Committed_AS":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.CommittedAS = t * 1024
+ case "HighTotal":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HighTotal = t * 1024
+ case "HighFree":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HighFree = t * 1024
+ case "LowTotal":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.LowTotal = t * 1024
+ case "LowFree":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.LowFree = t * 1024
+ case "SwapTotal":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.SwapTotal = t * 1024
+ case "SwapFree":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.SwapFree = t * 1024
+ case "Mapped":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.Mapped = t * 1024
+ case "VmallocTotal":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.VmallocTotal = t * 1024
+ case "VmallocUsed":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.VmallocUsed = t * 1024
+ case "VmallocChunk":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.VmallocChunk = t * 1024
+ case "HugePages_Total":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HugePagesTotal = t
+ case "HugePages_Free":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HugePagesFree = t
+ case "HugePages_Rsvd":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HugePagesRsvd = t
+ case "HugePages_Surp":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HugePagesSurp = t
+ case "Hugepagesize":
+ t, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return ret, retEx, err
+ }
+ ret.HugePageSize = t * 1024
+ }
+ }
+
+ ret.Cached += ret.Sreclaimable
+
+ if !memavail {
+ if activeFile && inactiveFile && sReclaimable {
+ ret.Available = calculateAvailVmem(ret, retEx)
+ } else {
+ ret.Available = ret.Cached + ret.Free
+ }
+ }
+
+ ret.Used = ret.Total - ret.Free - ret.Buffers - ret.Cached
+ ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0
+
+ return ret, retEx, nil
+}
+
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ sysinfo := &unix.Sysinfo_t{}
+
+ if err := unix.Sysinfo(sysinfo); err != nil {
+ return nil, err
+ }
+ ret := &SwapMemoryStat{
+ Total: uint64(sysinfo.Totalswap) * uint64(sysinfo.Unit),
+ Free: uint64(sysinfo.Freeswap) * uint64(sysinfo.Unit),
+ }
+ ret.Used = ret.Total - ret.Free
+ // check Infinity
+ if ret.Total != 0 {
+ ret.UsedPercent = float64(ret.Total-ret.Free) / float64(ret.Total) * 100.0
+ } else {
+ ret.UsedPercent = 0
+ }
+ filename := common.HostProc("vmstat")
+ lines, _ := common.ReadLines(filename)
+ for _, l := range lines {
+ fields := strings.Fields(l)
+ if len(fields) < 2 {
+ continue
+ }
+ switch fields[0] {
+ case "pswpin":
+ value, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ ret.Sin = value * 4 * 1024
+ case "pswpout":
+ value, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ ret.Sout = value * 4 * 1024
+ case "pgpgIn":
+ value, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ ret.PgIn = value * 4 * 1024
+ case "pgpgOut":
+ value, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ ret.PgOut = value * 4 * 1024
+ case "pgFault":
+ value, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ ret.PgFault = value * 4 * 1024
+ case "pgMajFault":
+ value, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ ret.PgMajFault = value * 4 * 1024
+ }
+ }
+ return ret, nil
+}
+
+// calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide
+// "MemAvailable:" column. It reimplements an algorithm from the link below
+// https://github.com/giampaolo/psutil/pull/890
+func calculateAvailVmem(ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 {
+ var watermarkLow uint64
+
+ fn := common.HostProc("zoneinfo")
+ lines, err := common.ReadLines(fn)
+ if err != nil {
+ return ret.Free + ret.Cached // fallback under kernel 2.6.13
+ }
+
+ pagesize := uint64(os.Getpagesize())
+ watermarkLow = 0
+
+ for _, line := range lines {
+ fields := strings.Fields(line)
+
+ if strings.HasPrefix(fields[0], "low") {
+ lowValue, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ lowValue = 0
+ }
+ watermarkLow += lowValue
+ }
+ }
+
+ watermarkLow *= pagesize
+
+ availMemory := ret.Free - watermarkLow
+ pageCache := retEx.ActiveFile + retEx.InactiveFile
+ pageCache -= uint64(math.Min(float64(pageCache/2), float64(watermarkLow)))
+ availMemory += pageCache
+ availMemory += ret.Sreclaimable - uint64(math.Min(float64(ret.Sreclaimable/2.0), float64(watermarkLow)))
+
+ if availMemory < 0 {
+ availMemory = 0
+ }
+
+ return availMemory
+}
+
+const swapsFilename = "swaps"
+
+// swaps file column indexes
+const (
+ nameCol = 0
+ // typeCol = 1
+ totalCol = 2
+ usedCol = 3
+ // priorityCol = 4
+)
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ swapsFilePath := common.HostProc(swapsFilename)
+ f, err := os.Open(swapsFilePath)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseSwapsFile(f)
+}
+
+func parseSwapsFile(r io.Reader) ([]*SwapDevice, error) {
+ swapsFilePath := common.HostProc(swapsFilename)
+ scanner := bufio.NewScanner(r)
+ if !scanner.Scan() {
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("couldn't read file %q: %w", swapsFilePath, err)
+ }
+ return nil, fmt.Errorf("unexpected end-of-file in %q", swapsFilePath)
+
+ }
+
+ // Check header headerFields are as expected
+ headerFields := strings.Fields(scanner.Text())
+ if len(headerFields) < usedCol {
+ return nil, fmt.Errorf("couldn't parse %q: too few fields in header", swapsFilePath)
+ }
+ if headerFields[nameCol] != "Filename" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapsFilePath, headerFields[nameCol], "Filename")
+ }
+ if headerFields[totalCol] != "Size" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapsFilePath, headerFields[totalCol], "Size")
+ }
+ if headerFields[usedCol] != "Used" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapsFilePath, headerFields[usedCol], "Used")
+ }
+
+ var swapDevices []*SwapDevice
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) < usedCol {
+ return nil, fmt.Errorf("couldn't parse %q: too few fields", swapsFilePath)
+ }
+
+ totalKiB, err := strconv.ParseUint(fields[totalCol], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse 'Size' column in %q: %w", swapsFilePath, err)
+ }
+
+ usedKiB, err := strconv.ParseUint(fields[usedCol], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse 'Used' column in %q: %w", swapsFilePath, err)
+ }
+
+ swapDevices = append(swapDevices, &SwapDevice{
+ Name: fields[nameCol],
+ UsedBytes: usedKiB * 1024,
+ FreeBytes: (totalKiB - usedKiB) * 1024,
+ })
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("couldn't read file %q: %w", swapsFilePath, err)
+ }
+
+ return swapDevices, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go
new file mode 100644
index 000000000..97644923a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go
@@ -0,0 +1,99 @@
+//go:build openbsd
+// +build openbsd
+
+package mem
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+func GetPageSize() (uint64, error) {
+ return GetPageSizeWithContext(context.Background())
+}
+
+func GetPageSizeWithContext(ctx context.Context) (uint64, error) {
+ uvmexp, err := unix.SysctlUvmexp("vm.uvmexp")
+ if err != nil {
+ return 0, err
+ }
+ return uint64(uvmexp.Pagesize), nil
+}
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ uvmexp, err := unix.SysctlUvmexp("vm.uvmexp")
+ if err != nil {
+ return nil, err
+ }
+ p := uint64(uvmexp.Pagesize)
+
+ ret := &VirtualMemoryStat{
+ Total: uint64(uvmexp.Npages) * p,
+ Free: uint64(uvmexp.Free) * p,
+ Active: uint64(uvmexp.Active) * p,
+ Inactive: uint64(uvmexp.Inactive) * p,
+ Cached: 0, // not available
+ Wired: uint64(uvmexp.Wired) * p,
+ }
+
+ ret.Available = ret.Inactive + ret.Cached + ret.Free
+ ret.Used = ret.Total - ret.Available
+ ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0
+
+ mib := []int32{CTLVfs, VfsGeneric, VfsBcacheStat}
+ buf, length, err := common.CallSyscall(mib)
+ if err != nil {
+ return nil, err
+ }
+ if length < sizeOfBcachestats {
+ return nil, fmt.Errorf("short syscall ret %d bytes", length)
+ }
+ var bcs Bcachestats
+ br := bytes.NewReader(buf)
+ err = common.Read(br, binary.LittleEndian, &bcs)
+ if err != nil {
+ return nil, err
+ }
+ ret.Buffers = uint64(bcs.Numbufpages) * p
+
+ return ret, nil
+}
+
+// Return swapctl summary info
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ out, err := invoke.CommandWithContext(ctx, "swapctl", "-sk")
+ if err != nil {
+ return &SwapMemoryStat{}, nil
+ }
+
+ line := string(out)
+ var total, used, free uint64
+
+ _, err = fmt.Sscanf(line,
+ "total: %d 1K-blocks allocated, %d used, %d available",
+ &total, &used, &free)
+ if err != nil {
+ return nil, errors.New("failed to parse swapctl output")
+ }
+
+ percent := float64(used) / float64(total) * 100
+ return &SwapMemoryStat{
+ Total: total * 1024,
+ Used: used * 1024,
+ Free: free * 1024,
+ UsedPercent: percent,
+ }, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go
new file mode 100644
index 000000000..de2b26ca4
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go
@@ -0,0 +1,38 @@
+//go:build openbsd && 386
+// +build openbsd,386
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs mem/types_openbsd.go
+
+package mem
+
+const (
+ CTLVfs = 10
+ VfsGeneric = 0
+ VfsBcacheStat = 3
+)
+
+const (
+ sizeOfBcachestats = 0x90
+)
+
+type Bcachestats struct {
+ Numbufs int64
+ Numbufpages int64
+ Numdirtypages int64
+ Numcleanpages int64
+ Pendingwrites int64
+ Pendingreads int64
+ Numwrites int64
+ Numreads int64
+ Cachehits int64
+ Busymapped int64
+ Dmapages int64
+ Highpages int64
+ Delwribufs int64
+ Kvaslots int64
+ Avail int64
+ Highflips int64
+ Highflops int64
+ Dmaflips int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go
new file mode 100644
index 000000000..d187abf01
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go
@@ -0,0 +1,32 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_openbsd.go
+
+package mem
+
+const (
+ CTLVfs = 10
+ VfsGeneric = 0
+ VfsBcacheStat = 3
+)
+
+const (
+ sizeOfBcachestats = 0x78
+)
+
+type Bcachestats struct {
+ Numbufs int64
+ Numbufpages int64
+ Numdirtypages int64
+ Numcleanpages int64
+ Pendingwrites int64
+ Pendingreads int64
+ Numwrites int64
+ Numreads int64
+ Cachehits int64
+ Busymapped int64
+ Dmapages int64
+ Highpages int64
+ Delwribufs int64
+ Kvaslots int64
+ Avail int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go
new file mode 100644
index 000000000..2488f1851
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go
@@ -0,0 +1,38 @@
+//go:build openbsd && arm
+// +build openbsd,arm
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs mem/types_openbsd.go
+
+package mem
+
+const (
+ CTLVfs = 10
+ VfsGeneric = 0
+ VfsBcacheStat = 3
+)
+
+const (
+ sizeOfBcachestats = 0x90
+)
+
+type Bcachestats struct {
+ Numbufs int64
+ Numbufpages int64
+ Numdirtypages int64
+ Numcleanpages int64
+ Pendingwrites int64
+ Pendingreads int64
+ Numwrites int64
+ Numreads int64
+ Cachehits int64
+ Busymapped int64
+ Dmapages int64
+ Highpages int64
+ Delwribufs int64
+ Kvaslots int64
+ Avail int64
+ Highflips int64
+ Highflops int64
+ Dmaflips int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go
new file mode 100644
index 000000000..3661b16fb
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go
@@ -0,0 +1,38 @@
+//go:build openbsd && arm64
+// +build openbsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs mem/types_openbsd.go
+
+package mem
+
+const (
+ CTLVfs = 10
+ VfsGeneric = 0
+ VfsBcacheStat = 3
+)
+
+const (
+ sizeOfBcachestats = 0x90
+)
+
+type Bcachestats struct {
+ Numbufs int64
+ Numbufpages int64
+ Numdirtypages int64
+ Numcleanpages int64
+ Pendingwrites int64
+ Pendingreads int64
+ Numwrites int64
+ Numreads int64
+ Cachehits int64
+ Busymapped int64
+ Dmapages int64
+ Highpages int64
+ Delwribufs int64
+ Kvaslots int64
+ Avail int64
+ Highflips int64
+ Highflops int64
+ Dmaflips int64
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go
new file mode 100644
index 000000000..b5259f844
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go
@@ -0,0 +1,68 @@
+//go:build plan9
+// +build plan9
+
+package mem
+
+import (
+ "context"
+ "os"
+
+ stats "github.com/lufia/plan9stats"
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ root := os.Getenv("HOST_ROOT")
+ m, err := stats.ReadMemStats(ctx, stats.WithRootDir(root))
+ if err != nil {
+ return nil, err
+ }
+ u := 0.0
+ if m.SwapPages.Avail != 0 {
+ u = float64(m.SwapPages.Used) / float64(m.SwapPages.Avail) * 100.0
+ }
+ return &SwapMemoryStat{
+ Total: uint64(m.SwapPages.Avail * m.PageSize),
+ Used: uint64(m.SwapPages.Used * m.PageSize),
+ Free: uint64(m.SwapPages.Free() * m.PageSize),
+ UsedPercent: u,
+ }, nil
+}
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ root := os.Getenv("HOST_ROOT")
+ m, err := stats.ReadMemStats(ctx, stats.WithRootDir(root))
+ if err != nil {
+ return nil, err
+ }
+ u := 0.0
+ if m.UserPages.Avail != 0 {
+ u = float64(m.UserPages.Used) / float64(m.UserPages.Avail) * 100.0
+ }
+ return &VirtualMemoryStat{
+ Total: uint64(m.Total),
+ Available: uint64(m.UserPages.Free() * m.PageSize),
+ Used: uint64(m.UserPages.Used * m.PageSize),
+ UsedPercent: u,
+ Free: uint64(m.UserPages.Free() * m.PageSize),
+
+ SwapTotal: uint64(m.SwapPages.Avail * m.PageSize),
+ SwapFree: uint64(m.SwapPages.Free() * m.PageSize),
+ }, nil
+}
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go
new file mode 100644
index 000000000..88f05f65d
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go
@@ -0,0 +1,186 @@
+//go:build solaris
+// +build solaris
+
+package mem
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+// VirtualMemory for Solaris is a minimal implementation which only returns
+// what Nomad needs. It does take into account global vs zone, however.
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ result := &VirtualMemoryStat{}
+
+ zoneName, err := zoneName()
+ if err != nil {
+ return nil, err
+ }
+
+ if zoneName == "global" {
+ cap, err := globalZoneMemoryCapacity()
+ if err != nil {
+ return nil, err
+ }
+ result.Total = cap
+ } else {
+ cap, err := nonGlobalZoneMemoryCapacity()
+ if err != nil {
+ return nil, err
+ }
+ result.Total = cap
+ }
+
+ return result, nil
+}
+
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func zoneName() (string, error) {
+ ctx := context.Background()
+ out, err := invoke.CommandWithContext(ctx, "zonename")
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(out)), nil
+}
+
+var globalZoneMemoryCapacityMatch = regexp.MustCompile(`[Mm]emory size: (\d+) Megabytes`)
+
+func globalZoneMemoryCapacity() (uint64, error) {
+ ctx := context.Background()
+ out, err := invoke.CommandWithContext(ctx, "prtconf")
+ if err != nil {
+ return 0, err
+ }
+
+ match := globalZoneMemoryCapacityMatch.FindAllStringSubmatch(string(out), -1)
+ if len(match) != 1 {
+ return 0, errors.New("memory size not contained in output of prtconf")
+ }
+
+ totalMB, err := strconv.ParseUint(match[0][1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ return totalMB * 1024 * 1024, nil
+}
+
+var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`)
+
+func nonGlobalZoneMemoryCapacity() (uint64, error) {
+ ctx := context.Background()
+ out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap")
+ if err != nil {
+ return 0, err
+ }
+
+ kstats := kstatMatch.FindAllStringSubmatch(string(out), -1)
+ if len(kstats) != 1 {
+ return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats))
+ }
+
+ memSizeBytes, err := strconv.ParseUint(kstats[0][2], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ return memSizeBytes, nil
+}
+
+const swapCommand = "swap"
+
+// The blockSize as reported by `swap -l`. See https://docs.oracle.com/cd/E23824_01/html/821-1459/fsswap-52195.html
+const blockSize = 512
+
+// swapctl column indexes
+const (
+ nameCol = 0
+ // devCol = 1
+ // swaploCol = 2
+ totalBlocksCol = 3
+ freeBlocksCol = 4
+)
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ output, err := invoke.CommandWithContext(ctx, swapCommand, "-l")
+ if err != nil {
+ return nil, fmt.Errorf("could not execute %q: %w", swapCommand, err)
+ }
+
+ return parseSwapsCommandOutput(string(output))
+}
+
+func parseSwapsCommandOutput(output string) ([]*SwapDevice, error) {
+ lines := strings.Split(output, "\n")
+ if len(lines) == 0 {
+ return nil, fmt.Errorf("could not parse output of %q: no lines in %q", swapCommand, output)
+ }
+
+ // Check header headerFields are as expected.
+ headerFields := strings.Fields(lines[0])
+ if len(headerFields) < freeBlocksCol {
+ return nil, fmt.Errorf("couldn't parse %q: too few fields in header %q", swapCommand, lines[0])
+ }
+ if headerFields[nameCol] != "swapfile" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[nameCol], "swapfile")
+ }
+ if headerFields[totalBlocksCol] != "blocks" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[totalBlocksCol], "blocks")
+ }
+ if headerFields[freeBlocksCol] != "free" {
+ return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[freeBlocksCol], "free")
+ }
+
+ var swapDevices []*SwapDevice
+ for _, line := range lines[1:] {
+ if line == "" {
+ continue // the terminal line is typically empty
+ }
+ fields := strings.Fields(line)
+ if len(fields) < freeBlocksCol {
+ return nil, fmt.Errorf("couldn't parse %q: too few fields", swapCommand)
+ }
+
+ totalBlocks, err := strconv.ParseUint(fields[totalBlocksCol], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse 'Size' column in %q: %w", swapCommand, err)
+ }
+
+ freeBlocks, err := strconv.ParseUint(fields[freeBlocksCol], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse 'Used' column in %q: %w", swapCommand, err)
+ }
+
+ swapDevices = append(swapDevices, &SwapDevice{
+ Name: fields[nameCol],
+ UsedBytes: (totalBlocks - freeBlocks) * blockSize,
+ FreeBytes: freeBlocks * blockSize,
+ })
+ }
+
+ return swapDevices, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go
new file mode 100644
index 000000000..8c7fb1a13
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go
@@ -0,0 +1,166 @@
+//go:build windows
+// +build windows
+
+package mem
+
+import (
+ "context"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ procEnumPageFilesW = common.ModPsapi.NewProc("EnumPageFilesW")
+ procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo")
+ procGetPerformanceInfo = common.ModPsapi.NewProc("GetPerformanceInfo")
+ procGlobalMemoryStatusEx = common.Modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+type memoryStatusEx struct {
+ cbSize uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64 // in bytes
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ var memInfo memoryStatusEx
+ memInfo.cbSize = uint32(unsafe.Sizeof(memInfo))
+ mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo)))
+ if mem == 0 {
+ return nil, windows.GetLastError()
+ }
+
+ ret := &VirtualMemoryStat{
+ Total: memInfo.ullTotalPhys,
+ Available: memInfo.ullAvailPhys,
+ Free: memInfo.ullAvailPhys,
+ UsedPercent: float64(memInfo.dwMemoryLoad),
+ }
+
+ ret.Used = ret.Total - ret.Available
+ return ret, nil
+}
+
+type performanceInformation struct {
+ cb uint32
+ commitTotal uint64
+ commitLimit uint64
+ commitPeak uint64
+ physicalTotal uint64
+ physicalAvailable uint64
+ systemCache uint64
+ kernelTotal uint64
+ kernelPaged uint64
+ kernelNonpaged uint64
+ pageSize uint64
+ handleCount uint32
+ processCount uint32
+ threadCount uint32
+}
+
+func SwapMemory() (*SwapMemoryStat, error) {
+ return SwapMemoryWithContext(context.Background())
+}
+
+func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
+ var perfInfo performanceInformation
+ perfInfo.cb = uint32(unsafe.Sizeof(perfInfo))
+ mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb))
+ if mem == 0 {
+ return nil, windows.GetLastError()
+ }
+ tot := perfInfo.commitLimit * perfInfo.pageSize
+ used := perfInfo.commitTotal * perfInfo.pageSize
+ free := tot - used
+ var usedPercent float64
+ if tot == 0 {
+ usedPercent = 0
+ } else {
+ usedPercent = float64(used) / float64(tot) * 100
+ }
+ ret := &SwapMemoryStat{
+ Total: tot,
+ Used: used,
+ Free: free,
+ UsedPercent: usedPercent,
+ }
+
+ return ret, nil
+}
+
+var (
+ pageSize uint64
+ pageSizeOnce sync.Once
+)
+
+type systemInfo struct {
+ wProcessorArchitecture uint16
+ wReserved uint16
+ dwPageSize uint32
+ lpMinimumApplicationAddress uintptr
+ lpMaximumApplicationAddress uintptr
+ dwActiveProcessorMask uintptr
+ dwNumberOfProcessors uint32
+ dwProcessorType uint32
+ dwAllocationGranularity uint32
+ wProcessorLevel uint16
+ wProcessorRevision uint16
+}
+
+// system type as defined in https://docs.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-enum_page_file_information
+type enumPageFileInformation struct {
+ cb uint32
+ reserved uint32
+ totalSize uint64
+ totalInUse uint64
+ peakUsage uint64
+}
+
+func SwapDevices() ([]*SwapDevice, error) {
+ return SwapDevicesWithContext(context.Background())
+}
+
+func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
+ pageSizeOnce.Do(func() {
+ var sysInfo systemInfo
+ procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sysInfo)))
+ pageSize = uint64(sysInfo.dwPageSize)
+ })
+
+ // the following system call invokes the supplied callback function once for each page file before returning
+ // see https://docs.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-enumpagefilesw
+ var swapDevices []*SwapDevice
+ result, _, _ := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&swapDevices)))
+ if result == 0 {
+ return nil, windows.GetLastError()
+ }
+
+ return swapDevices, nil
+}
+
+// system callback as defined in https://docs.microsoft.com/en-us/windows/win32/api/psapi/nc-psapi-penum_page_file_callbackw
+func pEnumPageFileCallbackW(swapDevices *[]*SwapDevice, enumPageFileInfo *enumPageFileInformation, lpFilenamePtr *[syscall.MAX_LONG_PATH]uint16) *bool {
+ *swapDevices = append(*swapDevices, &SwapDevice{
+ Name: syscall.UTF16ToString((*lpFilenamePtr)[:]),
+ UsedBytes: enumPageFileInfo.totalInUse * pageSize,
+ FreeBytes: (enumPageFileInfo.totalSize - enumPageFileInfo.totalInUse) * pageSize,
+ })
+
+ // return true to continue enumerating page files
+ ret := true
+ return &ret
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net.go
new file mode 100644
index 000000000..0f3a62f39
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net.go
@@ -0,0 +1,273 @@
+package net
+
+import (
+ "context"
+ "encoding/json"
+ "net"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var invoke common.Invoker = common.Invoke{}
+
+type IOCountersStat struct {
+ Name string `json:"name"` // interface name
+ BytesSent uint64 `json:"bytesSent"` // number of bytes sent
+ BytesRecv uint64 `json:"bytesRecv"` // number of bytes received
+ PacketsSent uint64 `json:"packetsSent"` // number of packets sent
+ PacketsRecv uint64 `json:"packetsRecv"` // number of packets received
+ Errin uint64 `json:"errin"` // total number of errors while receiving
+ Errout uint64 `json:"errout"` // total number of errors while sending
+ Dropin uint64 `json:"dropin"` // total number of incoming packets which were dropped
+ Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD)
+ Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving
+ Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending
+}
+
+// Addr is implemented compatibility to psutil
+type Addr struct {
+ IP string `json:"ip"`
+ Port uint32 `json:"port"`
+}
+
+type ConnectionStat struct {
+ Fd uint32 `json:"fd"`
+ Family uint32 `json:"family"`
+ Type uint32 `json:"type"`
+ Laddr Addr `json:"localaddr"`
+ Raddr Addr `json:"remoteaddr"`
+ Status string `json:"status"`
+ Uids []int32 `json:"uids"`
+ Pid int32 `json:"pid"`
+}
+
+// System wide stats about different network protocols
+type ProtoCountersStat struct {
+ Protocol string `json:"protocol"`
+ Stats map[string]int64 `json:"stats"`
+}
+
+// NetInterfaceAddr is designed for represent interface addresses
+type InterfaceAddr struct {
+ Addr string `json:"addr"`
+}
+
+// InterfaceAddrList is a list of InterfaceAddr
+type InterfaceAddrList []InterfaceAddr
+
+type InterfaceStat struct {
+ Index int `json:"index"`
+ MTU int `json:"mtu"` // maximum transmission unit
+ Name string `json:"name"` // e.g., "en0", "lo0", "eth0.100"
+ HardwareAddr string `json:"hardwareAddr"` // IEEE MAC-48, EUI-48 and EUI-64 form
+ Flags []string `json:"flags"` // e.g., FlagUp, FlagLoopback, FlagMulticast
+ Addrs InterfaceAddrList `json:"addrs"`
+}
+
+// InterfaceStatList is a list of InterfaceStat
+type InterfaceStatList []InterfaceStat
+
+type FilterStat struct {
+ ConnTrackCount int64 `json:"connTrackCount"`
+ ConnTrackMax int64 `json:"connTrackMax"`
+}
+
+// ConntrackStat has conntrack summary info
+type ConntrackStat struct {
+ Entries uint32 `json:"entries"` // Number of entries in the conntrack table
+ Searched uint32 `json:"searched"` // Number of conntrack table lookups performed
+ Found uint32 `json:"found"` // Number of searched entries which were successful
+ New uint32 `json:"new"` // Number of entries added which were not expected before
+ Invalid uint32 `json:"invalid"` // Number of packets seen which can not be tracked
+ Ignore uint32 `json:"ignore"` // Packets seen which are already connected to an entry
+ Delete uint32 `json:"delete"` // Number of entries which were removed
+ DeleteList uint32 `json:"deleteList"` // Number of entries which were put to dying list
+ Insert uint32 `json:"insert"` // Number of entries inserted into the list
+ InsertFailed uint32 `json:"insertFailed"` // # insertion attempted but failed (same entry exists)
+ Drop uint32 `json:"drop"` // Number of packets dropped due to conntrack failure.
+ EarlyDrop uint32 `json:"earlyDrop"` // Dropped entries to make room for new ones, if maxsize reached
+ IcmpError uint32 `json:"icmpError"` // Subset of invalid. Packets that can't be tracked d/t error
+ ExpectNew uint32 `json:"expectNew"` // Entries added after an expectation was already present
+ ExpectCreate uint32 `json:"expectCreate"` // Expectations added
+ ExpectDelete uint32 `json:"expectDelete"` // Expectations deleted
+ SearchRestart uint32 `json:"searchRestart"` // Conntrack table lookups restarted due to hashtable resizes
+}
+
+func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat {
+ return &ConntrackStat{
+ Entries: e,
+ Searched: s,
+ Found: f,
+ New: n,
+ Invalid: inv,
+ Ignore: ign,
+ Delete: del,
+ DeleteList: dlst,
+ Insert: ins,
+ InsertFailed: insfail,
+ Drop: drop,
+ EarlyDrop: edrop,
+ IcmpError: ie,
+ ExpectNew: en,
+ ExpectCreate: ec,
+ ExpectDelete: ed,
+ SearchRestart: sr,
+ }
+}
+
+type ConntrackStatList struct {
+ items []*ConntrackStat
+}
+
+func NewConntrackStatList() *ConntrackStatList {
+ return &ConntrackStatList{
+ items: []*ConntrackStat{},
+ }
+}
+
+func (l *ConntrackStatList) Append(c *ConntrackStat) {
+ l.items = append(l.items, c)
+}
+
+func (l *ConntrackStatList) Items() []ConntrackStat {
+ items := make([]ConntrackStat, len(l.items))
+ for i, el := range l.items {
+ items[i] = *el
+ }
+ return items
+}
+
+// Summary returns a single-element list with totals from all list items.
+func (l *ConntrackStatList) Summary() []ConntrackStat {
+ summary := NewConntrackStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ for _, cs := range l.items {
+ summary.Entries += cs.Entries
+ summary.Searched += cs.Searched
+ summary.Found += cs.Found
+ summary.New += cs.New
+ summary.Invalid += cs.Invalid
+ summary.Ignore += cs.Ignore
+ summary.Delete += cs.Delete
+ summary.DeleteList += cs.DeleteList
+ summary.Insert += cs.Insert
+ summary.InsertFailed += cs.InsertFailed
+ summary.Drop += cs.Drop
+ summary.EarlyDrop += cs.EarlyDrop
+ summary.IcmpError += cs.IcmpError
+ summary.ExpectNew += cs.ExpectNew
+ summary.ExpectCreate += cs.ExpectCreate
+ summary.ExpectDelete += cs.ExpectDelete
+ summary.SearchRestart += cs.SearchRestart
+ }
+ return []ConntrackStat{*summary}
+}
+
+func (n IOCountersStat) String() string {
+ s, _ := json.Marshal(n)
+ return string(s)
+}
+
+func (n ConnectionStat) String() string {
+ s, _ := json.Marshal(n)
+ return string(s)
+}
+
+func (n ProtoCountersStat) String() string {
+ s, _ := json.Marshal(n)
+ return string(s)
+}
+
+func (a Addr) String() string {
+ s, _ := json.Marshal(a)
+ return string(s)
+}
+
+func (n InterfaceStat) String() string {
+ s, _ := json.Marshal(n)
+ return string(s)
+}
+
+func (l InterfaceStatList) String() string {
+ s, _ := json.Marshal(l)
+ return string(s)
+}
+
+func (n InterfaceAddr) String() string {
+ s, _ := json.Marshal(n)
+ return string(s)
+}
+
+func (n ConntrackStat) String() string {
+ s, _ := json.Marshal(n)
+ return string(s)
+}
+
+func Interfaces() (InterfaceStatList, error) {
+ return InterfacesWithContext(context.Background())
+}
+
+func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) {
+ is, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ ret := make(InterfaceStatList, 0, len(is))
+ for _, ifi := range is {
+
+ var flags []string
+ if ifi.Flags&net.FlagUp != 0 {
+ flags = append(flags, "up")
+ }
+ if ifi.Flags&net.FlagBroadcast != 0 {
+ flags = append(flags, "broadcast")
+ }
+ if ifi.Flags&net.FlagLoopback != 0 {
+ flags = append(flags, "loopback")
+ }
+ if ifi.Flags&net.FlagPointToPoint != 0 {
+ flags = append(flags, "pointtopoint")
+ }
+ if ifi.Flags&net.FlagMulticast != 0 {
+ flags = append(flags, "multicast")
+ }
+
+ r := InterfaceStat{
+ Index: ifi.Index,
+ Name: ifi.Name,
+ MTU: ifi.MTU,
+ HardwareAddr: ifi.HardwareAddr.String(),
+ Flags: flags,
+ }
+ addrs, err := ifi.Addrs()
+ if err == nil {
+ r.Addrs = make(InterfaceAddrList, 0, len(addrs))
+ for _, addr := range addrs {
+ r.Addrs = append(r.Addrs, InterfaceAddr{
+ Addr: addr.String(),
+ })
+ }
+
+ }
+ ret = append(ret, r)
+ }
+
+ return ret, nil
+}
+
+func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) {
+ r := IOCountersStat{
+ Name: "all",
+ }
+ for _, nic := range n {
+ r.BytesRecv += nic.BytesRecv
+ r.PacketsRecv += nic.PacketsRecv
+ r.Errin += nic.Errin
+ r.Dropin += nic.Dropin
+ r.BytesSent += nic.BytesSent
+ r.PacketsSent += nic.PacketsSent
+ r.Errout += nic.Errout
+ r.Dropout += nic.Dropout
+ }
+
+ return []IOCountersStat{r}, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go
new file mode 100644
index 000000000..81feaa8d7
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go
@@ -0,0 +1,330 @@
+//go:build aix
+// +build aix
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+// IOCountersByFile exists just for compatibility with Linux.
+func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCountersByFileWithContext(context.Background(), pernic, filename)
+}
+
+func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCounters(pernic)
+}
+
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func parseNetstatNetLine(line string) (ConnectionStat, error) {
+ f := strings.Fields(line)
+ if len(f) < 5 {
+ return ConnectionStat{}, fmt.Errorf("wrong line,%s", line)
+ }
+
+ var netType, netFamily uint32
+ switch f[0] {
+ case "tcp", "tcp4":
+ netType = syscall.SOCK_STREAM
+ netFamily = syscall.AF_INET
+ case "udp", "udp4":
+ netType = syscall.SOCK_DGRAM
+ netFamily = syscall.AF_INET
+ case "tcp6":
+ netType = syscall.SOCK_STREAM
+ netFamily = syscall.AF_INET6
+ case "udp6":
+ netType = syscall.SOCK_DGRAM
+ netFamily = syscall.AF_INET6
+ default:
+ return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0])
+ }
+
+ laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily)
+ if err != nil {
+ return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4])
+ }
+
+ n := ConnectionStat{
+ Fd: uint32(0), // not supported
+ Family: uint32(netFamily),
+ Type: uint32(netType),
+ Laddr: laddr,
+ Raddr: raddr,
+ Pid: int32(0), // not supported
+ }
+ if len(f) == 6 {
+ n.Status = f[5]
+ }
+
+ return n, nil
+}
+
+var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`)
+
+// This function only works for netstat returning addresses with a "."
+// before the port (0.0.0.0.22 instead of 0.0.0.0:22).
+func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) {
+ parse := func(l string) (Addr, error) {
+ matches := portMatch.FindStringSubmatch(l)
+ if matches == nil {
+ return Addr{}, fmt.Errorf("wrong addr, %s", l)
+ }
+ host := matches[1]
+ port := matches[2]
+ if host == "*" {
+ switch family {
+ case syscall.AF_INET:
+ host = "0.0.0.0"
+ case syscall.AF_INET6:
+ host = "::"
+ default:
+ return Addr{}, fmt.Errorf("unknown family, %d", family)
+ }
+ }
+ lport, err := strconv.Atoi(port)
+ if err != nil {
+ return Addr{}, err
+ }
+ return Addr{IP: host, Port: uint32(lport)}, nil
+ }
+
+ laddr, err = parse(local)
+ if remote != "*.*" { // remote addr exists
+ raddr, err = parse(remote)
+ if err != nil {
+ return laddr, raddr, err
+ }
+ }
+
+ return laddr, raddr, err
+}
+
+func parseNetstatUnixLine(f []string) (ConnectionStat, error) {
+ if len(f) < 8 {
+ return ConnectionStat{}, fmt.Errorf("wrong number of fields: expected >=8 got %d", len(f))
+ }
+
+ var netType uint32
+
+ switch f[1] {
+ case "dgram":
+ netType = syscall.SOCK_DGRAM
+ case "stream":
+ netType = syscall.SOCK_STREAM
+ default:
+ return ConnectionStat{}, fmt.Errorf("unknown type: %s", f[1])
+ }
+
+ // Some Unix Socket don't have any address associated
+ addr := ""
+ if len(f) == 9 {
+ addr = f[8]
+ }
+
+ c := ConnectionStat{
+ Fd: uint32(0), // not supported
+ Family: uint32(syscall.AF_UNIX),
+ Type: uint32(netType),
+ Laddr: Addr{
+ IP: addr,
+ },
+ Status: "NONE",
+ Pid: int32(0), // not supported
+ }
+
+ return c, nil
+}
+
+// Return true if proto is the corresponding to the kind parameter
+// Only for Inet lines
+func hasCorrectInetProto(kind, proto string) bool {
+ switch kind {
+ case "all", "inet":
+ return true
+ case "unix":
+ return false
+ case "inet4":
+ return !strings.HasSuffix(proto, "6")
+ case "inet6":
+ return strings.HasSuffix(proto, "6")
+ case "tcp":
+ return proto == "tcp" || proto == "tcp4" || proto == "tcp6"
+ case "tcp4":
+ return proto == "tcp" || proto == "tcp4"
+ case "tcp6":
+ return proto == "tcp6"
+ case "udp":
+ return proto == "udp" || proto == "udp4" || proto == "udp6"
+ case "udp4":
+ return proto == "udp" || proto == "udp4"
+ case "udp6":
+ return proto == "udp6"
+ }
+ return false
+}
+
+func parseNetstatA(output string, kind string) ([]ConnectionStat, error) {
+ var ret []ConnectionStat
+ lines := strings.Split(string(output), "\n")
+
+ for _, line := range lines {
+ fields := strings.Fields(line)
+ if len(fields) < 1 {
+ continue
+ }
+
+ if strings.HasPrefix(fields[0], "f1") {
+ // Unix lines
+ if len(fields) < 2 {
+ // every unix connections have two lines
+ continue
+ }
+
+ c, err := parseNetstatUnixLine(fields)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Unix Address (%s): %s", line, err)
+ }
+
+ ret = append(ret, c)
+
+ } else if strings.HasPrefix(fields[0], "tcp") || strings.HasPrefix(fields[0], "udp") {
+ // Inet lines
+ if !hasCorrectInetProto(kind, fields[0]) {
+ continue
+ }
+
+ // On AIX, netstat display some connections with "*.*" as local addresses
+ // Skip them as they aren't real connections.
+ if fields[3] == "*.*" {
+ continue
+ }
+
+ c, err := parseNetstatNetLine(line)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Inet Address (%s): %s", line, err)
+ }
+
+ ret = append(ret, c)
+ } else {
+ // Header lines
+ continue
+ }
+ }
+
+ return ret, nil
+}
+
+func Connections(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ args := []string{"-na"}
+ switch strings.ToLower(kind) {
+ default:
+ fallthrough
+ case "":
+ kind = "all"
+ case "all":
+ // nothing to add
+ case "inet", "inet4", "inet6":
+ args = append(args, "-finet")
+ case "tcp", "tcp4", "tcp6":
+ args = append(args, "-finet")
+ case "udp", "udp4", "udp6":
+ args = append(args, "-finet")
+ case "unix":
+ args = append(args, "-funix")
+ }
+
+ out, err := invoke.CommandWithContext(ctx, "netstat", args...)
+ if err != nil {
+ return nil, err
+ }
+
+ ret, err := parseNetstatA(string(out), kind)
+ if err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithContext(context.Background(), kind, max)
+}
+
+func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+// Return a list of network connections opened, omitting `Uids`.
+// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be
+// removed from the API in the future.
+func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithoutUidsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0)
+}
+
+func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max)
+}
+
+func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0)
+}
+
+func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max)
+}
+
+func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go
new file mode 100644
index 000000000..8cf8c9142
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go
@@ -0,0 +1,36 @@
+//go:build aix && cgo
+// +build aix,cgo
+
+package net
+
+import (
+ "context"
+
+ "github.com/power-devops/perfstat"
+)
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ ifs, err := perfstat.NetIfaceStat()
+ if err != nil {
+ return nil, err
+ }
+
+ iocounters := make([]IOCountersStat, 0, len(ifs))
+ for _, netif := range ifs {
+ n := IOCountersStat{
+ Name: netif.Name,
+ BytesSent: uint64(netif.OBytes),
+ BytesRecv: uint64(netif.IBytes),
+ PacketsSent: uint64(netif.OPackets),
+ PacketsRecv: uint64(netif.IPackets),
+ Errin: uint64(netif.OErrors),
+ Errout: uint64(netif.IErrors),
+ Dropout: uint64(netif.XmitDrops),
+ }
+ iocounters = append(iocounters, n)
+ }
+ if pernic == false {
+ return getIOCountersAll(iocounters)
+ }
+ return iocounters, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go
new file mode 100644
index 000000000..e3fce9021
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go
@@ -0,0 +1,95 @@
+//go:build aix && !cgo
+// +build aix,!cgo
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func parseNetstatI(output string) ([]IOCountersStat, error) {
+ lines := strings.Split(string(output), "\n")
+ ret := make([]IOCountersStat, 0, len(lines)-1)
+ exists := make([]string, 0, len(ret))
+
+ // Check first line is header
+ if len(lines) > 0 && strings.Fields(lines[0])[0] != "Name" {
+ return nil, fmt.Errorf("not a 'netstat -i' output")
+ }
+
+ for _, line := range lines[1:] {
+ values := strings.Fields(line)
+ if len(values) < 1 || values[0] == "Name" {
+ continue
+ }
+ if common.StringsHas(exists, values[0]) {
+ // skip if already get
+ continue
+ }
+ exists = append(exists, values[0])
+
+ if len(values) < 9 {
+ continue
+ }
+
+ base := 1
+ // sometimes Address is omitted
+ if len(values) < 10 {
+ base = 0
+ }
+
+ parsed := make([]uint64, 0, 5)
+ vv := []string{
+ values[base+3], // Ipkts == PacketsRecv
+ values[base+4], // Ierrs == Errin
+ values[base+5], // Opkts == PacketsSent
+ values[base+6], // Oerrs == Errout
+ values[base+8], // Drops == Dropout
+ }
+
+ for _, target := range vv {
+ if target == "-" {
+ parsed = append(parsed, 0)
+ continue
+ }
+
+ t, err := strconv.ParseUint(target, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ parsed = append(parsed, t)
+ }
+
+ n := IOCountersStat{
+ Name: values[0],
+ PacketsRecv: parsed[0],
+ Errin: parsed[1],
+ PacketsSent: parsed[2],
+ Errout: parsed[3],
+ Dropout: parsed[4],
+ }
+ ret = append(ret, n)
+ }
+ return ret, nil
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ out, err := invoke.CommandWithContext(ctx, "netstat", "-idn")
+ if err != nil {
+ return nil, err
+ }
+
+ iocounters, err := parseNetstatI(string(out))
+ if err != nil {
+ return nil, err
+ }
+ if pernic == false {
+ return getIOCountersAll(iocounters)
+ }
+ return iocounters, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go
new file mode 100644
index 000000000..1c8d4f4e3
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go
@@ -0,0 +1,291 @@
+//go:build darwin
+// +build darwin
+
+package net
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var (
+ errNetstatHeader = errors.New("Can't parse header of netstat output")
+ netstatLinkRegexp = regexp.MustCompile(`^ $`)
+)
+
+const endOfLine = "\n"
+
+func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err error) {
+ var (
+ numericValue uint64
+ columns = strings.Fields(line)
+ )
+
+ if columns[0] == "Name" {
+ err = errNetstatHeader
+ return
+ }
+
+ // try to extract the numeric value from
+ if subMatch := netstatLinkRegexp.FindStringSubmatch(columns[2]); len(subMatch) == 2 {
+ numericValue, err = strconv.ParseUint(subMatch[1], 10, 64)
+ if err != nil {
+ return
+ }
+ linkIDUint := uint(numericValue)
+ linkID = &linkIDUint
+ }
+
+ base := 1
+ numberColumns := len(columns)
+ // sometimes Address is omitted
+ if numberColumns < 12 {
+ base = 0
+ }
+ if numberColumns < 11 || numberColumns > 13 {
+ err = fmt.Errorf("Line %q do have an invalid number of columns %d", line, numberColumns)
+ return
+ }
+
+ parsed := make([]uint64, 0, 7)
+ vv := []string{
+ columns[base+3], // Ipkts == PacketsRecv
+ columns[base+4], // Ierrs == Errin
+ columns[base+5], // Ibytes == BytesRecv
+ columns[base+6], // Opkts == PacketsSent
+ columns[base+7], // Oerrs == Errout
+ columns[base+8], // Obytes == BytesSent
+ }
+ if len(columns) == 12 {
+ vv = append(vv, columns[base+10])
+ }
+
+ for _, target := range vv {
+ if target == "-" {
+ parsed = append(parsed, 0)
+ continue
+ }
+
+ if numericValue, err = strconv.ParseUint(target, 10, 64); err != nil {
+ return
+ }
+ parsed = append(parsed, numericValue)
+ }
+
+ stat = &IOCountersStat{
+ Name: strings.Trim(columns[0], "*"), // remove the * that sometimes is on right on interface
+ PacketsRecv: parsed[0],
+ Errin: parsed[1],
+ BytesRecv: parsed[2],
+ PacketsSent: parsed[3],
+ Errout: parsed[4],
+ BytesSent: parsed[5],
+ }
+ if len(parsed) == 7 {
+ stat.Dropout = parsed[6]
+ }
+ return
+}
+
+type netstatInterface struct {
+ linkID *uint
+ stat *IOCountersStat
+}
+
+func parseNetstatOutput(output string) ([]netstatInterface, error) {
+ var (
+ err error
+ lines = strings.Split(strings.Trim(output, endOfLine), endOfLine)
+ )
+
+ // number of interfaces is number of lines less one for the header
+ numberInterfaces := len(lines) - 1
+
+ interfaces := make([]netstatInterface, numberInterfaces)
+ // no output beside header
+ if numberInterfaces == 0 {
+ return interfaces, nil
+ }
+
+ for index := 0; index < numberInterfaces; index++ {
+ nsIface := netstatInterface{}
+ if nsIface.stat, nsIface.linkID, err = parseNetstatLine(lines[index+1]); err != nil {
+ return nil, err
+ }
+ interfaces[index] = nsIface
+ }
+ return interfaces, nil
+}
+
+// map that hold the name of a network interface and the number of usage
+type mapInterfaceNameUsage map[string]uint
+
+func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage {
+ output := make(mapInterfaceNameUsage)
+ for index := range ifaces {
+ if ifaces[index].linkID != nil {
+ ifaceName := ifaces[index].stat.Name
+ usage, ok := output[ifaceName]
+ if ok {
+ output[ifaceName] = usage + 1
+ } else {
+ output[ifaceName] = 1
+ }
+ }
+ }
+ return output
+}
+
+func (min mapInterfaceNameUsage) isTruncated() bool {
+ for _, usage := range min {
+ if usage > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func (min mapInterfaceNameUsage) notTruncated() []string {
+ output := make([]string, 0)
+ for ifaceName, usage := range min {
+ if usage == 1 {
+ output = append(output, ifaceName)
+ }
+ }
+ return output
+}
+
+// example of `netstat -ibdnW` output on yosemite
+// Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop
+// lo0 16384 869107 0 169411755 869107 0 169411755 0 0
+// lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - -
+// lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - -
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ var (
+ ret []IOCountersStat
+ retIndex int
+ )
+
+ netstat, err := exec.LookPath("netstat")
+ if err != nil {
+ return nil, err
+ }
+
+ // try to get all interface metrics, and hope there won't be any truncated
+ out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW")
+ if err != nil {
+ return nil, err
+ }
+
+ nsInterfaces, err := parseNetstatOutput(string(out))
+ if err != nil {
+ return nil, err
+ }
+
+ ifaceUsage := newMapInterfaceNameUsage(nsInterfaces)
+ notTruncated := ifaceUsage.notTruncated()
+ ret = make([]IOCountersStat, len(notTruncated))
+
+ if !ifaceUsage.isTruncated() {
+ // no truncated interface name, return stats of all interface with
+ for index := range nsInterfaces {
+ if nsInterfaces[index].linkID != nil {
+ ret[retIndex] = *nsInterfaces[index].stat
+ retIndex++
+ }
+ }
+ } else {
+ // duplicated interface, list all interfaces
+ if out, err = invoke.CommandWithContext(ctx, "ifconfig", "-l"); err != nil {
+ return nil, err
+ }
+ interfaceNames := strings.Fields(strings.TrimRight(string(out), endOfLine))
+
+ // for each of the interface name, run netstat if we don't have any stats yet
+ for _, interfaceName := range interfaceNames {
+ truncated := true
+ for index := range nsInterfaces {
+ if nsInterfaces[index].linkID != nil && nsInterfaces[index].stat.Name == interfaceName {
+ // handle the non truncated name to avoid execute netstat for them again
+ ret[retIndex] = *nsInterfaces[index].stat
+ retIndex++
+ truncated = false
+ break
+ }
+ }
+ if truncated {
+ // run netstat with -I$ifacename
+ if out, err = invoke.CommandWithContext(ctx, netstat, "-ibdnWI"+interfaceName); err != nil {
+ return nil, err
+ }
+ parsedIfaces, err := parseNetstatOutput(string(out))
+ if err != nil {
+ return nil, err
+ }
+ if len(parsedIfaces) == 0 {
+ // interface had been removed since `ifconfig -l` had been executed
+ continue
+ }
+ for index := range parsedIfaces {
+ if parsedIfaces[index].linkID != nil {
+ ret = append(ret, *parsedIfaces[index].stat)
+ break
+ }
+ }
+ }
+ }
+ }
+
+ if pernic == false {
+ return getIOCountersAll(ret)
+ }
+ return ret, nil
+}
+
+// IOCountersByFile exists just for compatibility with Linux.
+func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCountersByFileWithContext(context.Background(), pernic, filename)
+}
+
+func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCounters(pernic)
+}
+
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+// NetProtoCounters returns network statistics for the entire system
+// If protocols is empty then all protocols are returned, otherwise
+// just the protocols in the list are returned.
+// Not Implemented for Darwin
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go
new file mode 100644
index 000000000..58325f655
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go
@@ -0,0 +1,93 @@
+//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows
+// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows
+
+package net
+
+import (
+ "context"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ return []IOCountersStat{}, common.ErrNotImplementedError
+}
+
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ return []FilterStat{}, common.ErrNotImplementedError
+}
+
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ return []ProtoCountersStat{}, common.ErrNotImplementedError
+}
+
+func Connections(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithContext(context.Background(), kind, max)
+}
+
+func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+// Return a list of network connections opened, omitting `Uids`.
+// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be
+// removed from the API in the future.
+func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithoutUidsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0)
+}
+
+func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max)
+}
+
+func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0)
+}
+
+func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max)
+}
+
+func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go
new file mode 100644
index 000000000..7f31851ea
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go
@@ -0,0 +1,128 @@
+//go:build freebsd
+// +build freebsd
+
+package net
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ out, err := invoke.CommandWithContext(ctx, "netstat", "-ibdnW")
+ if err != nil {
+ return nil, err
+ }
+
+ lines := strings.Split(string(out), "\n")
+ ret := make([]IOCountersStat, 0, len(lines)-1)
+ exists := make([]string, 0, len(ret))
+
+ for _, line := range lines {
+ values := strings.Fields(line)
+ if len(values) < 1 || values[0] == "Name" {
+ continue
+ }
+ if common.StringsHas(exists, values[0]) {
+ // skip if already get
+ continue
+ }
+ exists = append(exists, values[0])
+
+ if len(values) < 12 {
+ continue
+ }
+ base := 1
+ // sometimes Address is omitted
+ if len(values) < 13 {
+ base = 0
+ }
+
+ parsed := make([]uint64, 0, 8)
+ vv := []string{
+ values[base+3], // PacketsRecv
+ values[base+4], // Errin
+ values[base+5], // Dropin
+ values[base+6], // BytesRecvn
+ values[base+7], // PacketSent
+ values[base+8], // Errout
+ values[base+9], // BytesSent
+ values[base+11], // Dropout
+ }
+ for _, target := range vv {
+ if target == "-" {
+ parsed = append(parsed, 0)
+ continue
+ }
+
+ t, err := strconv.ParseUint(target, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ parsed = append(parsed, t)
+ }
+
+ n := IOCountersStat{
+ Name: values[0],
+ PacketsRecv: parsed[0],
+ Errin: parsed[1],
+ Dropin: parsed[2],
+ BytesRecv: parsed[3],
+ PacketsSent: parsed[4],
+ Errout: parsed[5],
+ BytesSent: parsed[6],
+ Dropout: parsed[7],
+ }
+ ret = append(ret, n)
+ }
+
+ if pernic == false {
+ return getIOCountersAll(ret)
+ }
+
+ return ret, nil
+}
+
+// IOCountersByFile exists just for compatibility with Linux.
+func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCountersByFileWithContext(context.Background(), pernic, filename)
+}
+
+func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCounters(pernic)
+}
+
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+// NetProtoCounters returns network statistics for the entire system
+// If protocols is empty then all protocols are returned, otherwise
+// just the protocols in the list are returned.
+// Not Implemented for FreeBSD
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go
new file mode 100644
index 000000000..c08997196
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go
@@ -0,0 +1,911 @@
+//go:build linux
+// +build linux
+
+package net
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+const ( // Conntrack Column numbers
+ ctENTRIES = iota
+ ctSEARCHED
+ ctFOUND
+ ctNEW
+ ctINVALID
+ ctIGNORE
+ ctDELETE
+ ctDELETE_LIST
+ ctINSERT
+ ctINSERT_FAILED
+ ctDROP
+ ctEARLY_DROP
+ ctICMP_ERROR
+ CT_EXPEctNEW
+ ctEXPECT_CREATE
+ CT_EXPEctDELETE
+ ctSEARCH_RESTART
+)
+
+// NetIOCounters returns network I/O statistics for every network
+// interface installed on the system. If pernic argument is false,
+// return only sum of all information (which name is 'all'). If true,
+// every network interface installed on the system is returned
+// separately.
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ filename := common.HostProc("net/dev")
+ return IOCountersByFileWithContext(ctx, pernic, filename)
+}
+
+func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCountersByFileWithContext(context.Background(), pernic, filename)
+}
+
+func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
+ lines, err := common.ReadLines(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ parts := make([]string, 2)
+
+ statlen := len(lines) - 1
+
+ ret := make([]IOCountersStat, 0, statlen)
+
+ for _, line := range lines[2:] {
+ separatorPos := strings.LastIndex(line, ":")
+ if separatorPos == -1 {
+ continue
+ }
+ parts[0] = line[0:separatorPos]
+ parts[1] = line[separatorPos+1:]
+
+ interfaceName := strings.TrimSpace(parts[0])
+ if interfaceName == "" {
+ continue
+ }
+
+ fields := strings.Fields(strings.TrimSpace(parts[1]))
+ bytesRecv, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ packetsRecv, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ errIn, err := strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ dropIn, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ fifoIn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ bytesSent, err := strconv.ParseUint(fields[8], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ packetsSent, err := strconv.ParseUint(fields[9], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ errOut, err := strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ dropOut, err := strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ fifoOut, err := strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return ret, err
+ }
+
+ nic := IOCountersStat{
+ Name: interfaceName,
+ BytesRecv: bytesRecv,
+ PacketsRecv: packetsRecv,
+ Errin: errIn,
+ Dropin: dropIn,
+ Fifoin: fifoIn,
+ BytesSent: bytesSent,
+ PacketsSent: packetsSent,
+ Errout: errOut,
+ Dropout: dropOut,
+ Fifoout: fifoOut,
+ }
+ ret = append(ret, nic)
+ }
+
+ if !pernic {
+ return getIOCountersAll(ret)
+ }
+
+ return ret, nil
+}
+
+var netProtocols = []string{
+ "ip",
+ "icmp",
+ "icmpmsg",
+ "tcp",
+ "udp",
+ "udplite",
+}
+
+// NetProtoCounters returns network statistics for the entire system
+// If protocols is empty then all protocols are returned, otherwise
+// just the protocols in the list are returned.
+// Available protocols:
+// ip,icmp,icmpmsg,tcp,udp,udplite
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ if len(protocols) == 0 {
+ protocols = netProtocols
+ }
+
+ stats := make([]ProtoCountersStat, 0, len(protocols))
+ protos := make(map[string]bool, len(protocols))
+ for _, p := range protocols {
+ protos[p] = true
+ }
+
+ filename := common.HostProc("net/snmp")
+ lines, err := common.ReadLines(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ linecount := len(lines)
+ for i := 0; i < linecount; i++ {
+ line := lines[i]
+ r := strings.IndexRune(line, ':')
+ if r == -1 {
+ return nil, errors.New(filename + " is not formatted correctly, expected ':'.")
+ }
+ proto := strings.ToLower(line[:r])
+ if !protos[proto] {
+ // skip protocol and data line
+ i++
+ continue
+ }
+
+ // Read header line
+ statNames := strings.Split(line[r+2:], " ")
+
+ // Read data line
+ i++
+ statValues := strings.Split(lines[i][r+2:], " ")
+ if len(statNames) != len(statValues) {
+ return nil, errors.New(filename + " is not formatted correctly, expected same number of columns.")
+ }
+ stat := ProtoCountersStat{
+ Protocol: proto,
+ Stats: make(map[string]int64, len(statNames)),
+ }
+ for j := range statNames {
+ value, err := strconv.ParseInt(statValues[j], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ stat.Stats[statNames[j]] = value
+ }
+ stats = append(stats, stat)
+ }
+ return stats, nil
+}
+
+// NetFilterCounters returns iptables conntrack statistics
+// the currently in use conntrack count and the max.
+// If the file does not exist or is invalid it will return nil.
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count")
+ maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max")
+
+ count, err := common.ReadInts(countfile)
+ if err != nil {
+ return nil, err
+ }
+ stats := make([]FilterStat, 0, 1)
+
+ max, err := common.ReadInts(maxfile)
+ if err != nil {
+ return nil, err
+ }
+
+ payload := FilterStat{
+ ConnTrackCount: count[0],
+ ConnTrackMax: max[0],
+ }
+
+ stats = append(stats, payload)
+ return stats, nil
+}
+
+// ConntrackStats returns more detailed info about the conntrack table
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+// ConntrackStatsWithContext returns more detailed info about the conntrack table
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return conntrackStatsFromFile(common.HostProc("net/stat/nf_conntrack"), percpu)
+}
+
+// conntrackStatsFromFile returns more detailed info about the conntrack table
+// from `filename`
+// If 'percpu' is false, the result will contain exactly one item with totals/summary
+func conntrackStatsFromFile(filename string, percpu bool) ([]ConntrackStat, error) {
+ lines, err := common.ReadLines(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ statlist := NewConntrackStatList()
+
+ for _, line := range lines {
+ fields := strings.Fields(line)
+ if len(fields) == 17 && fields[0] != "entries" {
+ statlist.Append(NewConntrackStat(
+ common.HexToUint32(fields[ctENTRIES]),
+ common.HexToUint32(fields[ctSEARCHED]),
+ common.HexToUint32(fields[ctFOUND]),
+ common.HexToUint32(fields[ctNEW]),
+ common.HexToUint32(fields[ctINVALID]),
+ common.HexToUint32(fields[ctIGNORE]),
+ common.HexToUint32(fields[ctDELETE]),
+ common.HexToUint32(fields[ctDELETE_LIST]),
+ common.HexToUint32(fields[ctINSERT]),
+ common.HexToUint32(fields[ctINSERT_FAILED]),
+ common.HexToUint32(fields[ctDROP]),
+ common.HexToUint32(fields[ctEARLY_DROP]),
+ common.HexToUint32(fields[ctICMP_ERROR]),
+ common.HexToUint32(fields[CT_EXPEctNEW]),
+ common.HexToUint32(fields[ctEXPECT_CREATE]),
+ common.HexToUint32(fields[CT_EXPEctDELETE]),
+ common.HexToUint32(fields[ctSEARCH_RESTART]),
+ ))
+ }
+ }
+
+ if percpu {
+ return statlist.Items(), nil
+ }
+ return statlist.Summary(), nil
+}
+
+// http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
+var tcpStatuses = map[string]string{
+ "01": "ESTABLISHED",
+ "02": "SYN_SENT",
+ "03": "SYN_RECV",
+ "04": "FIN_WAIT1",
+ "05": "FIN_WAIT2",
+ "06": "TIME_WAIT",
+ "07": "CLOSE",
+ "08": "CLOSE_WAIT",
+ "09": "LAST_ACK",
+ "0A": "LISTEN",
+ "0B": "CLOSING",
+}
+
+type netConnectionKindType struct {
+ family uint32
+ sockType uint32
+ filename string
+}
+
+var kindTCP4 = netConnectionKindType{
+ family: syscall.AF_INET,
+ sockType: syscall.SOCK_STREAM,
+ filename: "tcp",
+}
+
+var kindTCP6 = netConnectionKindType{
+ family: syscall.AF_INET6,
+ sockType: syscall.SOCK_STREAM,
+ filename: "tcp6",
+}
+
+var kindUDP4 = netConnectionKindType{
+ family: syscall.AF_INET,
+ sockType: syscall.SOCK_DGRAM,
+ filename: "udp",
+}
+
+var kindUDP6 = netConnectionKindType{
+ family: syscall.AF_INET6,
+ sockType: syscall.SOCK_DGRAM,
+ filename: "udp6",
+}
+
+var kindUNIX = netConnectionKindType{
+ family: syscall.AF_UNIX,
+ filename: "unix",
+}
+
+var netConnectionKindMap = map[string][]netConnectionKindType{
+ "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX},
+ "tcp": {kindTCP4, kindTCP6},
+ "tcp4": {kindTCP4},
+ "tcp6": {kindTCP6},
+ "udp": {kindUDP4, kindUDP6},
+ "udp4": {kindUDP4},
+ "udp6": {kindUDP6},
+ "unix": {kindUNIX},
+ "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6},
+ "inet4": {kindTCP4, kindUDP4},
+ "inet6": {kindTCP6, kindUDP6},
+}
+
+type inodeMap struct {
+ pid int32
+ fd uint32
+}
+
+type connTmp struct {
+ fd uint32
+ family uint32
+ sockType uint32
+ laddr Addr
+ raddr Addr
+ status string
+ pid int32
+ boundPid int32
+ path string
+}
+
+// Return a list of network connections opened.
+func Connections(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsPidWithContext(ctx, kind, 0)
+}
+
+// Return a list of network connections opened returning at most `max`
+// connections for each running process.
+func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithContext(context.Background(), kind, max)
+}
+
+func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithContext(ctx, kind, 0, max)
+}
+
+// Return a list of network connections opened, omitting `Uids`.
+// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be
+// removed from the API in the future.
+func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithoutUidsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0)
+}
+
+func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max)
+}
+
+// Return a list of network connections opened by a process.
+func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithContext(ctx, kind, pid, 0)
+}
+
+func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0)
+}
+
+// Return up to `max` network connections opened by a process.
+func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false)
+}
+
+func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true)
+}
+
+func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) {
+ tmap, ok := netConnectionKindMap[kind]
+ if !ok {
+ return nil, fmt.Errorf("invalid kind, %s", kind)
+ }
+ root := common.HostProc()
+ var err error
+ var inodes map[string][]inodeMap
+ if pid == 0 {
+ inodes, err = getProcInodesAllWithContext(ctx, root, max)
+ } else {
+ inodes, err = getProcInodes(root, pid, max)
+ if len(inodes) == 0 {
+ // no connection for the pid
+ return []ConnectionStat{}, nil
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("cound not get pid(s), %d: %w", pid, err)
+ }
+ return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids)
+}
+
+func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) {
+ return statsFromInodesWithContext(context.Background(), root, pid, tmap, inodes, skipUids)
+}
+
+func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) {
+ dupCheckMap := make(map[string]struct{})
+ var ret []ConnectionStat
+
+ var err error
+ for _, t := range tmap {
+ var path string
+ var connKey string
+ var ls []connTmp
+ if pid == 0 {
+ path = fmt.Sprintf("%s/net/%s", root, t.filename)
+ } else {
+ path = fmt.Sprintf("%s/%d/net/%s", root, pid, t.filename)
+ }
+ switch t.family {
+ case syscall.AF_INET, syscall.AF_INET6:
+ ls, err = processInetWithContext(ctx, path, t, inodes, pid)
+ case syscall.AF_UNIX:
+ ls, err = processUnix(path, t, inodes, pid)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range ls {
+ // Build TCP key to id the connection uniquely
+ // socket type, src ip, src port, dst ip, dst port and state should be enough
+ // to prevent duplications.
+ connKey = fmt.Sprintf("%d-%s:%d-%s:%d-%s", c.sockType, c.laddr.IP, c.laddr.Port, c.raddr.IP, c.raddr.Port, c.status)
+ if _, ok := dupCheckMap[connKey]; ok {
+ continue
+ }
+
+ conn := ConnectionStat{
+ Fd: c.fd,
+ Family: c.family,
+ Type: c.sockType,
+ Laddr: c.laddr,
+ Raddr: c.raddr,
+ Status: c.status,
+ Pid: c.pid,
+ }
+ if c.pid == 0 {
+ conn.Pid = c.boundPid
+ } else {
+ conn.Pid = c.pid
+ }
+
+ if !skipUids {
+ // fetch process owner Real, effective, saved set, and filesystem UIDs
+ proc := process{Pid: conn.Pid}
+ conn.Uids, _ = proc.getUids()
+ }
+
+ ret = append(ret, conn)
+ dupCheckMap[connKey] = struct{}{}
+ }
+
+ }
+
+ return ret, nil
+}
+
+// getProcInodes returns fd of the pid.
+func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) {
+ ret := make(map[string][]inodeMap)
+
+ dir := fmt.Sprintf("%s/%d/fd", root, pid)
+ f, err := os.Open(dir)
+ if err != nil {
+ return ret, err
+ }
+ defer f.Close()
+ dirEntries, err := readDir(f, max)
+ if err != nil {
+ return ret, err
+ }
+ for _, dirEntry := range dirEntries {
+ inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, dirEntry.Name())
+
+ inode, err := os.Readlink(inodePath)
+ if err != nil {
+ continue
+ }
+ if !strings.HasPrefix(inode, "socket:[") {
+ continue
+ }
+ // the process is using a socket
+ l := len(inode)
+ inode = inode[8 : l-1]
+ _, ok := ret[inode]
+ if !ok {
+ ret[inode] = make([]inodeMap, 0)
+ }
+ fd, err := strconv.Atoi(dirEntry.Name())
+ if err != nil {
+ continue
+ }
+
+ i := inodeMap{
+ pid: pid,
+ fd: uint32(fd),
+ }
+ ret[inode] = append(ret[inode], i)
+ }
+ return ret, nil
+}
+
+// Pids retunres all pids.
+// Note: this is a copy of process_linux.Pids()
+// FIXME: Import process occures import cycle.
+// move to common made other platform breaking. Need consider.
+func Pids() ([]int32, error) {
+ return PidsWithContext(context.Background())
+}
+
+func PidsWithContext(ctx context.Context) ([]int32, error) {
+ var ret []int32
+
+ d, err := os.Open(common.HostProc())
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ fnames, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, fname := range fnames {
+ pid, err := strconv.ParseInt(fname, 10, 32)
+ if err != nil {
+ // if not numeric name, just skip
+ continue
+ }
+ ret = append(ret, int32(pid))
+ }
+
+ return ret, nil
+}
+
+// Note: the following is based off process_linux structs and methods
+// we need these to fetch the owner of a process ID
+// FIXME: Import process occures import cycle.
+// see remarks on pids()
+type process struct {
+ Pid int32 `json:"pid"`
+ uids []int32
+}
+
+// Uids returns user ids of the process as a slice of the int
+func (p *process) getUids() ([]int32, error) {
+ err := p.fillFromStatus()
+ if err != nil {
+ return []int32{}, err
+ }
+ return p.uids, nil
+}
+
+// Get status from /proc/(pid)/status
+func (p *process) fillFromStatus() error {
+ pid := p.Pid
+ statPath := common.HostProc(strconv.Itoa(int(pid)), "status")
+ contents, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return err
+ }
+ lines := strings.Split(string(contents), "\n")
+ for _, line := range lines {
+ tabParts := strings.SplitN(line, "\t", 2)
+ if len(tabParts) < 2 {
+ continue
+ }
+ value := tabParts[1]
+ switch strings.TrimRight(tabParts[0], ":") {
+ case "Uid":
+ p.uids = make([]int32, 0, 4)
+ for _, i := range strings.Split(value, "\t") {
+ v, err := strconv.ParseInt(i, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.uids = append(p.uids, int32(v))
+ }
+ }
+ }
+ return nil
+}
+
+func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) {
+ return getProcInodesAllWithContext(context.Background(), root, max)
+}
+
+func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) {
+ pids, err := PidsWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ ret := make(map[string][]inodeMap)
+
+ for _, pid := range pids {
+ t, err := getProcInodes(root, pid, max)
+ if err != nil {
+ // skip if permission error or no longer exists
+ if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) {
+ continue
+ }
+ return ret, err
+ }
+ if len(t) == 0 {
+ continue
+ }
+ // TODO: update ret.
+ ret = updateMap(ret, t)
+ }
+ return ret, nil
+}
+
+// decodeAddress decode addresse represents addr in proc/net/*
+// ex:
+// "0500000A:0016" -> "10.0.0.5", 22
+// "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53
+func decodeAddress(family uint32, src string) (Addr, error) {
+ return decodeAddressWithContext(context.Background(), family, src)
+}
+
+func decodeAddressWithContext(ctx context.Context, family uint32, src string) (Addr, error) {
+ t := strings.Split(src, ":")
+ if len(t) != 2 {
+ return Addr{}, fmt.Errorf("does not contain port, %s", src)
+ }
+ addr := t[0]
+ port, err := strconv.ParseUint(t[1], 16, 16)
+ if err != nil {
+ return Addr{}, fmt.Errorf("invalid port, %s", src)
+ }
+ decoded, err := hex.DecodeString(addr)
+ if err != nil {
+ return Addr{}, fmt.Errorf("decode error, %w", err)
+ }
+ var ip net.IP
+
+ if family == syscall.AF_INET {
+ if common.IsLittleEndian() {
+ ip = net.IP(ReverseWithContext(ctx, decoded))
+ } else {
+ ip = net.IP(decoded)
+ }
+ } else { // IPv6
+ ip, err = parseIPv6HexStringWithContext(ctx, decoded)
+ if err != nil {
+ return Addr{}, err
+ }
+ }
+ return Addr{
+ IP: ip.String(),
+ Port: uint32(port),
+ }, nil
+}
+
+// Reverse reverses array of bytes.
+func Reverse(s []byte) []byte {
+ return ReverseWithContext(context.Background(), s)
+}
+
+func ReverseWithContext(ctx context.Context, s []byte) []byte {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+ return s
+}
+
+// parseIPv6HexString parse array of bytes to IPv6 string
+func parseIPv6HexString(src []byte) (net.IP, error) {
+ return parseIPv6HexStringWithContext(context.Background(), src)
+}
+
+func parseIPv6HexStringWithContext(ctx context.Context, src []byte) (net.IP, error) {
+ if len(src) != 16 {
+ return nil, fmt.Errorf("invalid IPv6 string")
+ }
+
+ buf := make([]byte, 0, 16)
+ for i := 0; i < len(src); i += 4 {
+ r := ReverseWithContext(ctx, src[i:i+4])
+ buf = append(buf, r...)
+ }
+ return net.IP(buf), nil
+}
+
+func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
+ return processInetWithContext(context.Background(), file, kind, inodes, filterPid)
+}
+
+func processInetWithContext(ctx context.Context, file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
+ if strings.HasSuffix(file, "6") && !common.PathExists(file) {
+ // IPv6 not supported, return empty.
+ return []connTmp{}, nil
+ }
+
+ // Read the contents of the /proc file with a single read sys call.
+ // This minimizes duplicates in the returned connections
+ // For more info:
+ // https://github.com/shirou/gopsutil/pull/361
+ contents, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ lines := bytes.Split(contents, []byte("\n"))
+
+ var ret []connTmp
+ // skip first line
+ for _, line := range lines[1:] {
+ l := strings.Fields(string(line))
+ if len(l) < 10 {
+ continue
+ }
+ laddr := l[1]
+ raddr := l[2]
+ status := l[3]
+ inode := l[9]
+ pid := int32(0)
+ fd := uint32(0)
+ i, exists := inodes[inode]
+ if exists {
+ pid = i[0].pid
+ fd = i[0].fd
+ }
+ if filterPid > 0 && filterPid != pid {
+ continue
+ }
+ if kind.sockType == syscall.SOCK_STREAM {
+ status = tcpStatuses[status]
+ } else {
+ status = "NONE"
+ }
+ la, err := decodeAddressWithContext(ctx, kind.family, laddr)
+ if err != nil {
+ continue
+ }
+ ra, err := decodeAddressWithContext(ctx, kind.family, raddr)
+ if err != nil {
+ continue
+ }
+
+ ret = append(ret, connTmp{
+ fd: fd,
+ family: kind.family,
+ sockType: kind.sockType,
+ laddr: la,
+ raddr: ra,
+ status: status,
+ pid: pid,
+ })
+ }
+
+ return ret, nil
+}
+
+func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
+ // Read the contents of the /proc file with a single read sys call.
+ // This minimizes duplicates in the returned connections
+ // For more info:
+ // https://github.com/shirou/gopsutil/pull/361
+ contents, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ lines := bytes.Split(contents, []byte("\n"))
+
+ var ret []connTmp
+ // skip first line
+ for _, line := range lines[1:] {
+ tokens := strings.Fields(string(line))
+ if len(tokens) < 6 {
+ continue
+ }
+ st, err := strconv.Atoi(tokens[4])
+ if err != nil {
+ return nil, err
+ }
+
+ inode := tokens[6]
+
+ var pairs []inodeMap
+ pairs, exists := inodes[inode]
+ if !exists {
+ pairs = []inodeMap{
+ {},
+ }
+ }
+ for _, pair := range pairs {
+ if filterPid > 0 && filterPid != pair.pid {
+ continue
+ }
+ var path string
+ if len(tokens) == 8 {
+ path = tokens[len(tokens)-1]
+ }
+ ret = append(ret, connTmp{
+ fd: pair.fd,
+ family: kind.family,
+ sockType: uint32(st),
+ laddr: Addr{
+ IP: path,
+ },
+ pid: pair.pid,
+ status: "NONE",
+ path: path,
+ })
+ }
+ }
+
+ return ret, nil
+}
+
+func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap {
+ for key, value := range add {
+ a, exists := src[key]
+ if !exists {
+ src[key] = value
+ continue
+ }
+ src[key] = append(a, value...)
+ }
+ return src
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go
new file mode 100644
index 000000000..bd5c95871
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go
@@ -0,0 +1,12 @@
+//go:build !go1.16
+// +build !go1.16
+
+package net
+
+import (
+ "os"
+)
+
+func readDir(f *os.File, max int) ([]os.FileInfo, error) {
+ return f.Readdir(max)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go
new file mode 100644
index 000000000..a45072e92
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go
@@ -0,0 +1,12 @@
+//go:build go1.16
+// +build go1.16
+
+package net
+
+import (
+ "os"
+)
+
+func readDir(f *os.File, max int) ([]os.DirEntry, error) {
+ return f.ReadDir(max)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go
new file mode 100644
index 000000000..5f066a09f
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go
@@ -0,0 +1,319 @@
+//go:build openbsd
+// +build openbsd
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`)
+
+func ParseNetstat(output string, mode string,
+ iocs map[string]IOCountersStat) error {
+ lines := strings.Split(output, "\n")
+
+ exists := make([]string, 0, len(lines)-1)
+
+ columns := 6
+ if mode == "ind" {
+ columns = 10
+ }
+ for _, line := range lines {
+ values := strings.Fields(line)
+ if len(values) < 1 || values[0] == "Name" {
+ continue
+ }
+ if common.StringsHas(exists, values[0]) {
+ // skip if already get
+ continue
+ }
+
+ if len(values) < columns {
+ continue
+ }
+ base := 1
+ // sometimes Address is omitted
+ if len(values) < columns {
+ base = 0
+ }
+
+ parsed := make([]uint64, 0, 8)
+ var vv []string
+ if mode == "inb" {
+ vv = []string{
+ values[base+3], // BytesRecv
+ values[base+4], // BytesSent
+ }
+ } else {
+ vv = []string{
+ values[base+3], // Ipkts
+ values[base+4], // Ierrs
+ values[base+5], // Opkts
+ values[base+6], // Oerrs
+ values[base+8], // Drops
+ }
+ }
+ for _, target := range vv {
+ if target == "-" {
+ parsed = append(parsed, 0)
+ continue
+ }
+
+ t, err := strconv.ParseUint(target, 10, 64)
+ if err != nil {
+ return err
+ }
+ parsed = append(parsed, t)
+ }
+ exists = append(exists, values[0])
+
+ n, present := iocs[values[0]]
+ if !present {
+ n = IOCountersStat{Name: values[0]}
+ }
+ if mode == "inb" {
+ n.BytesRecv = parsed[0]
+ n.BytesSent = parsed[1]
+ } else {
+ n.PacketsRecv = parsed[0]
+ n.Errin = parsed[1]
+ n.PacketsSent = parsed[2]
+ n.Errout = parsed[3]
+ n.Dropin = parsed[4]
+ n.Dropout = parsed[4]
+ }
+
+ iocs[n.Name] = n
+ }
+ return nil
+}
+
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ netstat, err := exec.LookPath("netstat")
+ if err != nil {
+ return nil, err
+ }
+ out, err := invoke.CommandWithContext(ctx, netstat, "-inb")
+ if err != nil {
+ return nil, err
+ }
+ out2, err := invoke.CommandWithContext(ctx, netstat, "-ind")
+ if err != nil {
+ return nil, err
+ }
+ iocs := make(map[string]IOCountersStat)
+
+ lines := strings.Split(string(out), "\n")
+ ret := make([]IOCountersStat, 0, len(lines)-1)
+
+ err = ParseNetstat(string(out), "inb", iocs)
+ if err != nil {
+ return nil, err
+ }
+ err = ParseNetstat(string(out2), "ind", iocs)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, ioc := range iocs {
+ ret = append(ret, ioc)
+ }
+
+ if pernic == false {
+ return getIOCountersAll(ret)
+ }
+
+ return ret, nil
+}
+
+// IOCountersByFile exists just for compatibility with Linux.
+func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCountersByFileWithContext(context.Background(), pernic, filename)
+}
+
+func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCounters(pernic)
+}
+
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+// NetProtoCounters returns network statistics for the entire system
+// If protocols is empty then all protocols are returned, otherwise
+// just the protocols in the list are returned.
+// Not Implemented for OpenBSD
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func parseNetstatLine(line string) (ConnectionStat, error) {
+ f := strings.Fields(line)
+ if len(f) < 5 {
+ return ConnectionStat{}, fmt.Errorf("wrong line,%s", line)
+ }
+
+ var netType, netFamily uint32
+ switch f[0] {
+ case "tcp":
+ netType = syscall.SOCK_STREAM
+ netFamily = syscall.AF_INET
+ case "udp":
+ netType = syscall.SOCK_DGRAM
+ netFamily = syscall.AF_INET
+ case "tcp6":
+ netType = syscall.SOCK_STREAM
+ netFamily = syscall.AF_INET6
+ case "udp6":
+ netType = syscall.SOCK_DGRAM
+ netFamily = syscall.AF_INET6
+ default:
+ return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0])
+ }
+
+ laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily)
+ if err != nil {
+ return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4])
+ }
+
+ n := ConnectionStat{
+ Fd: uint32(0), // not supported
+ Family: uint32(netFamily),
+ Type: uint32(netType),
+ Laddr: laddr,
+ Raddr: raddr,
+ Pid: int32(0), // not supported
+ }
+ if len(f) == 6 {
+ n.Status = f[5]
+ }
+
+ return n, nil
+}
+
+func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) {
+ parse := func(l string) (Addr, error) {
+ matches := portMatch.FindStringSubmatch(l)
+ if matches == nil {
+ return Addr{}, fmt.Errorf("wrong addr, %s", l)
+ }
+ host := matches[1]
+ port := matches[2]
+ if host == "*" {
+ switch family {
+ case syscall.AF_INET:
+ host = "0.0.0.0"
+ case syscall.AF_INET6:
+ host = "::"
+ default:
+ return Addr{}, fmt.Errorf("unknown family, %d", family)
+ }
+ }
+ lport, err := strconv.Atoi(port)
+ if err != nil {
+ return Addr{}, err
+ }
+ return Addr{IP: host, Port: uint32(lport)}, nil
+ }
+
+ laddr, err = parse(local)
+ if remote != "*.*" { // remote addr exists
+ raddr, err = parse(remote)
+ if err != nil {
+ return laddr, raddr, err
+ }
+ }
+
+ return laddr, raddr, err
+}
+
+// Return a list of network connections opened.
+func Connections(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ var ret []ConnectionStat
+
+ args := []string{"-na"}
+ switch strings.ToLower(kind) {
+ default:
+ fallthrough
+ case "":
+ fallthrough
+ case "all":
+ fallthrough
+ case "inet":
+ // nothing to add
+ case "inet4":
+ args = append(args, "-finet")
+ case "inet6":
+ args = append(args, "-finet6")
+ case "tcp":
+ args = append(args, "-ptcp")
+ case "tcp4":
+ args = append(args, "-ptcp", "-finet")
+ case "tcp6":
+ args = append(args, "-ptcp", "-finet6")
+ case "udp":
+ args = append(args, "-pudp")
+ case "udp4":
+ args = append(args, "-pudp", "-finet")
+ case "udp6":
+ args = append(args, "-pudp", "-finet6")
+ case "unix":
+ return ret, common.ErrNotImplementedError
+ }
+
+ netstat, err := exec.LookPath("netstat")
+ if err != nil {
+ return nil, err
+ }
+ out, err := invoke.CommandWithContext(ctx, netstat, args...)
+ if err != nil {
+ return nil, err
+ }
+ lines := strings.Split(string(out), "\n")
+ for _, line := range lines {
+ if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) {
+ continue
+ }
+ n, err := parseNetstatLine(line)
+ if err != nil {
+ continue
+ }
+
+ ret = append(ret, n)
+ }
+
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go
new file mode 100644
index 000000000..2fd2224fa
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go
@@ -0,0 +1,224 @@
+//go:build freebsd || darwin
+// +build freebsd darwin
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+// Return a list of network connections opened.
+func Connections(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsPid(kind, 0)
+}
+
+// Return a list of network connections opened returning at most `max`
+// connections for each running process.
+func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithContext(context.Background(), kind, max)
+}
+
+func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+// Return a list of network connections opened by a process.
+func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ var ret []ConnectionStat
+
+ args := []string{"-i"}
+ switch strings.ToLower(kind) {
+ default:
+ fallthrough
+ case "":
+ fallthrough
+ case "all":
+ fallthrough
+ case "inet":
+ args = append(args, "tcp", "-i", "udp")
+ case "inet4":
+ args = append(args, "4")
+ case "inet6":
+ args = append(args, "6")
+ case "tcp":
+ args = append(args, "tcp")
+ case "tcp4":
+ args = append(args, "4tcp")
+ case "tcp6":
+ args = append(args, "6tcp")
+ case "udp":
+ args = append(args, "udp")
+ case "udp4":
+ args = append(args, "4udp")
+ case "udp6":
+ args = append(args, "6udp")
+ case "unix":
+ args = []string{"-U"}
+ }
+
+ r, err := common.CallLsofWithContext(ctx, invoke, pid, args...)
+ if err != nil {
+ return nil, err
+ }
+ for _, rr := range r {
+ if strings.HasPrefix(rr, "COMMAND") {
+ continue
+ }
+ n, err := parseNetLine(rr)
+ if err != nil {
+ continue
+ }
+
+ ret = append(ret, n)
+ }
+
+ return ret, nil
+}
+
+var constMap = map[string]int{
+ "unix": syscall.AF_UNIX,
+ "TCP": syscall.SOCK_STREAM,
+ "UDP": syscall.SOCK_DGRAM,
+ "IPv4": syscall.AF_INET,
+ "IPv6": syscall.AF_INET6,
+}
+
+func parseNetLine(line string) (ConnectionStat, error) {
+ f := strings.Fields(line)
+ if len(f) < 8 {
+ return ConnectionStat{}, fmt.Errorf("wrong line,%s", line)
+ }
+
+ if len(f) == 8 {
+ f = append(f, f[7])
+ f[7] = "unix"
+ }
+
+ pid, err := strconv.Atoi(f[1])
+ if err != nil {
+ return ConnectionStat{}, err
+ }
+ fd, err := strconv.Atoi(strings.Trim(f[3], "u"))
+ if err != nil {
+ return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3])
+ }
+ netFamily, ok := constMap[f[4]]
+ if !ok {
+ return ConnectionStat{}, fmt.Errorf("unknown family, %s", f[4])
+ }
+ netType, ok := constMap[f[7]]
+ if !ok {
+ return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[7])
+ }
+
+ var laddr, raddr Addr
+ if f[7] == "unix" {
+ laddr.IP = f[8]
+ } else {
+ laddr, raddr, err = parseNetAddr(f[8])
+ if err != nil {
+ return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s", f[8])
+ }
+ }
+
+ n := ConnectionStat{
+ Fd: uint32(fd),
+ Family: uint32(netFamily),
+ Type: uint32(netType),
+ Laddr: laddr,
+ Raddr: raddr,
+ Pid: int32(pid),
+ }
+ if len(f) == 10 {
+ n.Status = strings.Trim(f[9], "()")
+ }
+
+ return n, nil
+}
+
+func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) {
+ parse := func(l string) (Addr, error) {
+ host, port, err := net.SplitHostPort(l)
+ if err != nil {
+ return Addr{}, fmt.Errorf("wrong addr, %s", l)
+ }
+ lport, err := strconv.Atoi(port)
+ if err != nil {
+ return Addr{}, err
+ }
+ return Addr{IP: host, Port: uint32(lport)}, nil
+ }
+
+ addrs := strings.Split(line, "->")
+ if len(addrs) == 0 {
+ return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line)
+ }
+ laddr, err = parse(addrs[0])
+ if len(addrs) == 2 { // remote addr exists
+ raddr, err = parse(addrs[1])
+ if err != nil {
+ return laddr, raddr, err
+ }
+ }
+
+ return laddr, raddr, err
+}
+
+// Return up to `max` network connections opened by a process.
+func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+// Return a list of network connections opened, omitting `Uids`.
+// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be
+// removed from the API in the future.
+func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithoutUidsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0)
+}
+
+func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max)
+}
+
+func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0)
+}
+
+func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max)
+}
+
+func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go
new file mode 100644
index 000000000..731c8f97b
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go
@@ -0,0 +1,778 @@
+//go:build windows
+// +build windows
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
+ procGetExtendedTCPTable = modiphlpapi.NewProc("GetExtendedTcpTable")
+ procGetExtendedUDPTable = modiphlpapi.NewProc("GetExtendedUdpTable")
+ procGetIfEntry2 = modiphlpapi.NewProc("GetIfEntry2")
+)
+
+const (
+ TCPTableBasicListener = iota
+ TCPTableBasicConnections
+ TCPTableBasicAll
+ TCPTableOwnerPIDListener
+ TCPTableOwnerPIDConnections
+ TCPTableOwnerPIDAll
+ TCPTableOwnerModuleListener
+ TCPTableOwnerModuleConnections
+ TCPTableOwnerModuleAll
+)
+
+type netConnectionKindType struct {
+ family uint32
+ sockType uint32
+ filename string
+}
+
+var kindTCP4 = netConnectionKindType{
+ family: syscall.AF_INET,
+ sockType: syscall.SOCK_STREAM,
+ filename: "tcp",
+}
+
+var kindTCP6 = netConnectionKindType{
+ family: syscall.AF_INET6,
+ sockType: syscall.SOCK_STREAM,
+ filename: "tcp6",
+}
+
+var kindUDP4 = netConnectionKindType{
+ family: syscall.AF_INET,
+ sockType: syscall.SOCK_DGRAM,
+ filename: "udp",
+}
+
+var kindUDP6 = netConnectionKindType{
+ family: syscall.AF_INET6,
+ sockType: syscall.SOCK_DGRAM,
+ filename: "udp6",
+}
+
+var netConnectionKindMap = map[string][]netConnectionKindType{
+ "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6},
+ "tcp": {kindTCP4, kindTCP6},
+ "tcp4": {kindTCP4},
+ "tcp6": {kindTCP6},
+ "udp": {kindUDP4, kindUDP6},
+ "udp4": {kindUDP4},
+ "udp6": {kindUDP6},
+ "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6},
+ "inet4": {kindTCP4, kindUDP4},
+ "inet6": {kindTCP6, kindUDP6},
+}
+
+// https://github.com/microsoft/ethr/blob/aecdaf923970e5a9b4c461b4e2e3963d781ad2cc/plt_windows.go#L114-L170
+type guid struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
+
+const (
+ maxStringSize = 256
+ maxPhysAddressLength = 32
+ pad0for64_4for32 = 0
+)
+
+type mibIfRow2 struct {
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ InterfaceGuid guid
+ Alias [maxStringSize + 1]uint16
+ Description [maxStringSize + 1]uint16
+ PhysicalAddressLength uint32
+ PhysicalAddress [maxPhysAddressLength]uint8
+ PermanentPhysicalAddress [maxPhysAddressLength]uint8
+ Mtu uint32
+ Type uint32
+ TunnelType uint32
+ MediaType uint32
+ PhysicalMediumType uint32
+ AccessType uint32
+ DirectionType uint32
+ InterfaceAndOperStatusFlags uint32
+ OperStatus uint32
+ AdminStatus uint32
+ MediaConnectState uint32
+ NetworkGuid guid
+ ConnectionType uint32
+ padding1 [pad0for64_4for32]byte
+ TransmitLinkSpeed uint64
+ ReceiveLinkSpeed uint64
+ InOctets uint64
+ InUcastPkts uint64
+ InNUcastPkts uint64
+ InDiscards uint64
+ InErrors uint64
+ InUnknownProtos uint64
+ InUcastOctets uint64
+ InMulticastOctets uint64
+ InBroadcastOctets uint64
+ OutOctets uint64
+ OutUcastPkts uint64
+ OutNUcastPkts uint64
+ OutDiscards uint64
+ OutErrors uint64
+ OutUcastOctets uint64
+ OutMulticastOctets uint64
+ OutBroadcastOctets uint64
+ OutQLen uint64
+}
+
+func IOCounters(pernic bool) ([]IOCountersStat, error) {
+ return IOCountersWithContext(context.Background(), pernic)
+}
+
+func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
+ ifs, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ var counters []IOCountersStat
+
+ err = procGetIfEntry2.Find()
+ if err == nil { // Vista+, uint64 values (issue#693)
+ for _, ifi := range ifs {
+ c := IOCountersStat{
+ Name: ifi.Name,
+ }
+
+ row := mibIfRow2{InterfaceIndex: uint32(ifi.Index)}
+ ret, _, err := procGetIfEntry2.Call(uintptr(unsafe.Pointer(&row)))
+ if ret != 0 {
+ return nil, os.NewSyscallError("GetIfEntry2", err)
+ }
+ c.BytesSent = uint64(row.OutOctets)
+ c.BytesRecv = uint64(row.InOctets)
+ c.PacketsSent = uint64(row.OutUcastPkts)
+ c.PacketsRecv = uint64(row.InUcastPkts)
+ c.Errin = uint64(row.InErrors)
+ c.Errout = uint64(row.OutErrors)
+ c.Dropin = uint64(row.InDiscards)
+ c.Dropout = uint64(row.OutDiscards)
+
+ counters = append(counters, c)
+ }
+ } else { // WinXP fallback, uint32 values
+ for _, ifi := range ifs {
+ c := IOCountersStat{
+ Name: ifi.Name,
+ }
+
+ row := windows.MibIfRow{Index: uint32(ifi.Index)}
+ err = windows.GetIfEntry(&row)
+ if err != nil {
+ return nil, os.NewSyscallError("GetIfEntry", err)
+ }
+ c.BytesSent = uint64(row.OutOctets)
+ c.BytesRecv = uint64(row.InOctets)
+ c.PacketsSent = uint64(row.OutUcastPkts)
+ c.PacketsRecv = uint64(row.InUcastPkts)
+ c.Errin = uint64(row.InErrors)
+ c.Errout = uint64(row.OutErrors)
+ c.Dropin = uint64(row.InDiscards)
+ c.Dropout = uint64(row.OutDiscards)
+
+ counters = append(counters, c)
+ }
+ }
+
+ if !pernic {
+ return getIOCountersAll(counters)
+ }
+ return counters, nil
+}
+
+// IOCountersByFile exists just for compatibility with Linux.
+func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCountersByFileWithContext(context.Background(), pernic, filename)
+}
+
+func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
+ return IOCounters(pernic)
+}
+
+// Return a list of network connections
+// Available kind:
+// reference to netConnectionKindMap
+func Connections(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsPidWithContext(ctx, kind, 0)
+}
+
+// ConnectionsPid Return a list of network connections opened by a process
+func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ tmap, ok := netConnectionKindMap[kind]
+ if !ok {
+ return nil, fmt.Errorf("invalid kind, %s", kind)
+ }
+ return getProcInet(tmap, pid)
+}
+
+func getProcInet(kinds []netConnectionKindType, pid int32) ([]ConnectionStat, error) {
+ stats := make([]ConnectionStat, 0)
+
+ for _, kind := range kinds {
+ s, err := getNetStatWithKind(kind)
+ if err != nil {
+ continue
+ }
+
+ if pid == 0 {
+ stats = append(stats, s...)
+ } else {
+ for _, ns := range s {
+ if ns.Pid != pid {
+ continue
+ }
+ stats = append(stats, ns)
+ }
+ }
+ }
+
+ return stats, nil
+}
+
+func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error) {
+ if kindType.filename == "" {
+ return nil, fmt.Errorf("kind filename must be required")
+ }
+
+ switch kindType.filename {
+ case kindTCP4.filename:
+ return getTCPConnections(kindTCP4.family)
+ case kindTCP6.filename:
+ return getTCPConnections(kindTCP6.family)
+ case kindUDP4.filename:
+ return getUDPConnections(kindUDP4.family)
+ case kindUDP6.filename:
+ return getUDPConnections(kindUDP6.family)
+ }
+
+ return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename)
+}
+
+// Return a list of network connections opened returning at most `max`
+// connections for each running process.
+func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithContext(context.Background(), kind, max)
+}
+
+func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+// Return a list of network connections opened, omitting `Uids`.
+// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be
+// removed from the API in the future.
+func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) {
+ return ConnectionsWithoutUidsWithContext(context.Background(), kind)
+}
+
+func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) {
+ return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0)
+}
+
+func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max)
+}
+
+func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid)
+}
+
+func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0)
+}
+
+func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max)
+}
+
+func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max)
+}
+
+func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) {
+ return []ConnectionStat{}, common.ErrNotImplementedError
+}
+
+func FilterCounters() ([]FilterStat, error) {
+ return FilterCountersWithContext(context.Background())
+}
+
+func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
+ return ConntrackStatsWithContext(context.Background(), percpu)
+}
+
+func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+// NetProtoCounters returns network statistics for the entire system
+// If protocols is empty then all protocols are returned, otherwise
+// just the protocols in the list are returned.
+// Not Implemented for Windows
+func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) {
+ return ProtoCountersWithContext(context.Background(), protocols)
+}
+
+func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func getTableUintptr(family uint32, buf []byte) uintptr {
+ var (
+ pmibTCPTable pmibTCPTableOwnerPidAll
+ pmibTCP6Table pmibTCP6TableOwnerPidAll
+
+ p uintptr
+ )
+ switch family {
+ case kindTCP4.family:
+ if len(buf) > 0 {
+ pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0]))
+ p = uintptr(unsafe.Pointer(pmibTCPTable))
+ } else {
+ p = uintptr(unsafe.Pointer(pmibTCPTable))
+ }
+ case kindTCP6.family:
+ if len(buf) > 0 {
+ pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0]))
+ p = uintptr(unsafe.Pointer(pmibTCP6Table))
+ } else {
+ p = uintptr(unsafe.Pointer(pmibTCP6Table))
+ }
+ }
+ return p
+}
+
+func getTableInfo(filename string, table interface{}) (index, step, length int) {
+ switch filename {
+ case kindTCP4.filename:
+ index = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).DwNumEntries))
+ step = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).Table))
+ length = int(table.(pmibTCPTableOwnerPidAll).DwNumEntries)
+ case kindTCP6.filename:
+ index = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).DwNumEntries))
+ step = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).Table))
+ length = int(table.(pmibTCP6TableOwnerPidAll).DwNumEntries)
+ case kindUDP4.filename:
+ index = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).DwNumEntries))
+ step = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).Table))
+ length = int(table.(pmibUDPTableOwnerPid).DwNumEntries)
+ case kindUDP6.filename:
+ index = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).DwNumEntries))
+ step = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).Table))
+ length = int(table.(pmibUDP6TableOwnerPid).DwNumEntries)
+ }
+
+ return
+}
+
+func getTCPConnections(family uint32) ([]ConnectionStat, error) {
+ var (
+ p uintptr
+ buf []byte
+ size uint32
+
+ pmibTCPTable pmibTCPTableOwnerPidAll
+ pmibTCP6Table pmibTCP6TableOwnerPidAll
+ )
+
+ if family == 0 {
+ return nil, fmt.Errorf("faimly must be required")
+ }
+
+ for {
+ switch family {
+ case kindTCP4.family:
+ if len(buf) > 0 {
+ pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0]))
+ p = uintptr(unsafe.Pointer(pmibTCPTable))
+ } else {
+ p = uintptr(unsafe.Pointer(pmibTCPTable))
+ }
+ case kindTCP6.family:
+ if len(buf) > 0 {
+ pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0]))
+ p = uintptr(unsafe.Pointer(pmibTCP6Table))
+ } else {
+ p = uintptr(unsafe.Pointer(pmibTCP6Table))
+ }
+ }
+
+ err := getExtendedTcpTable(p,
+ &size,
+ true,
+ family,
+ tcpTableOwnerPidAll,
+ 0)
+ if err == nil {
+ break
+ }
+ if err != windows.ERROR_INSUFFICIENT_BUFFER {
+ return nil, err
+ }
+ buf = make([]byte, size)
+ }
+
+ var (
+ index, step int
+ length int
+ )
+
+ stats := make([]ConnectionStat, 0)
+ switch family {
+ case kindTCP4.family:
+ index, step, length = getTableInfo(kindTCP4.filename, pmibTCPTable)
+ case kindTCP6.family:
+ index, step, length = getTableInfo(kindTCP6.filename, pmibTCP6Table)
+ }
+
+ if length == 0 {
+ return nil, nil
+ }
+
+ for i := 0; i < length; i++ {
+ switch family {
+ case kindTCP4.family:
+ mibs := (*mibTCPRowOwnerPid)(unsafe.Pointer(&buf[index]))
+ ns := mibs.convertToConnectionStat()
+ stats = append(stats, ns)
+ case kindTCP6.family:
+ mibs := (*mibTCP6RowOwnerPid)(unsafe.Pointer(&buf[index]))
+ ns := mibs.convertToConnectionStat()
+ stats = append(stats, ns)
+ }
+
+ index += step
+ }
+ return stats, nil
+}
+
+func getUDPConnections(family uint32) ([]ConnectionStat, error) {
+ var (
+ p uintptr
+ buf []byte
+ size uint32
+
+ pmibUDPTable pmibUDPTableOwnerPid
+ pmibUDP6Table pmibUDP6TableOwnerPid
+ )
+
+ if family == 0 {
+ return nil, fmt.Errorf("faimly must be required")
+ }
+
+ for {
+ switch family {
+ case kindUDP4.family:
+ if len(buf) > 0 {
+ pmibUDPTable = (*mibUDPTableOwnerPid)(unsafe.Pointer(&buf[0]))
+ p = uintptr(unsafe.Pointer(pmibUDPTable))
+ } else {
+ p = uintptr(unsafe.Pointer(pmibUDPTable))
+ }
+ case kindUDP6.family:
+ if len(buf) > 0 {
+ pmibUDP6Table = (*mibUDP6TableOwnerPid)(unsafe.Pointer(&buf[0]))
+ p = uintptr(unsafe.Pointer(pmibUDP6Table))
+ } else {
+ p = uintptr(unsafe.Pointer(pmibUDP6Table))
+ }
+ }
+
+ err := getExtendedUdpTable(
+ p,
+ &size,
+ true,
+ family,
+ udpTableOwnerPid,
+ 0,
+ )
+ if err == nil {
+ break
+ }
+ if err != windows.ERROR_INSUFFICIENT_BUFFER {
+ return nil, err
+ }
+ buf = make([]byte, size)
+ }
+
+ var index, step, length int
+
+ stats := make([]ConnectionStat, 0)
+ switch family {
+ case kindUDP4.family:
+ index, step, length = getTableInfo(kindUDP4.filename, pmibUDPTable)
+ case kindUDP6.family:
+ index, step, length = getTableInfo(kindUDP6.filename, pmibUDP6Table)
+ }
+
+ if length == 0 {
+ return nil, nil
+ }
+
+ for i := 0; i < length; i++ {
+ switch family {
+ case kindUDP4.family:
+ mibs := (*mibUDPRowOwnerPid)(unsafe.Pointer(&buf[index]))
+ ns := mibs.convertToConnectionStat()
+ stats = append(stats, ns)
+ case kindUDP6.family:
+ mibs := (*mibUDP6RowOwnerPid)(unsafe.Pointer(&buf[index]))
+ ns := mibs.convertToConnectionStat()
+ stats = append(stats, ns)
+ }
+
+ index += step
+ }
+ return stats, nil
+}
+
+// tcpStatuses https://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx
+var tcpStatuses = map[mibTCPState]string{
+ 1: "CLOSED",
+ 2: "LISTEN",
+ 3: "SYN_SENT",
+ 4: "SYN_RECEIVED",
+ 5: "ESTABLISHED",
+ 6: "FIN_WAIT_1",
+ 7: "FIN_WAIT_2",
+ 8: "CLOSE_WAIT",
+ 9: "CLOSING",
+ 10: "LAST_ACK",
+ 11: "TIME_WAIT",
+ 12: "DELETE",
+}
+
+func getExtendedTcpTable(pTcpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) {
+ r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTcpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved))
+ if r1 != 0 {
+ errcode = syscall.Errno(r1)
+ }
+ return
+}
+
+func getExtendedUdpTable(pUdpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) {
+ r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUdpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved))
+ if r1 != 0 {
+ errcode = syscall.Errno(r1)
+ }
+ return
+}
+
+func getUintptrFromBool(b bool) uintptr {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+const anySize = 1
+
+// type MIB_TCP_STATE int32
+type mibTCPState int32
+
+type tcpTableClass int32
+
+const (
+ tcpTableBasicListener tcpTableClass = iota
+ tcpTableBasicConnections
+ tcpTableBasicAll
+ tcpTableOwnerPidListener
+ tcpTableOwnerPidConnections
+ tcpTableOwnerPidAll
+ tcpTableOwnerModuleListener
+ tcpTableOwnerModuleConnections
+ tcpTableOwnerModuleAll
+)
+
+type udpTableClass int32
+
+const (
+ udpTableBasic udpTableClass = iota
+ udpTableOwnerPid
+ udpTableOwnerModule
+)
+
+// TCP
+
+type mibTCPRowOwnerPid struct {
+ DwState uint32
+ DwLocalAddr uint32
+ DwLocalPort uint32
+ DwRemoteAddr uint32
+ DwRemotePort uint32
+ DwOwningPid uint32
+}
+
+func (m *mibTCPRowOwnerPid) convertToConnectionStat() ConnectionStat {
+ ns := ConnectionStat{
+ Family: kindTCP4.family,
+ Type: kindTCP4.sockType,
+ Laddr: Addr{
+ IP: parseIPv4HexString(m.DwLocalAddr),
+ Port: uint32(decodePort(m.DwLocalPort)),
+ },
+ Raddr: Addr{
+ IP: parseIPv4HexString(m.DwRemoteAddr),
+ Port: uint32(decodePort(m.DwRemotePort)),
+ },
+ Pid: int32(m.DwOwningPid),
+ Status: tcpStatuses[mibTCPState(m.DwState)],
+ }
+
+ return ns
+}
+
+type mibTCPTableOwnerPid struct {
+ DwNumEntries uint32
+ Table [anySize]mibTCPRowOwnerPid
+}
+
+type mibTCP6RowOwnerPid struct {
+ UcLocalAddr [16]byte
+ DwLocalScopeId uint32
+ DwLocalPort uint32
+ UcRemoteAddr [16]byte
+ DwRemoteScopeId uint32
+ DwRemotePort uint32
+ DwState uint32
+ DwOwningPid uint32
+}
+
+func (m *mibTCP6RowOwnerPid) convertToConnectionStat() ConnectionStat {
+ ns := ConnectionStat{
+ Family: kindTCP6.family,
+ Type: kindTCP6.sockType,
+ Laddr: Addr{
+ IP: parseIPv6HexString(m.UcLocalAddr),
+ Port: uint32(decodePort(m.DwLocalPort)),
+ },
+ Raddr: Addr{
+ IP: parseIPv6HexString(m.UcRemoteAddr),
+ Port: uint32(decodePort(m.DwRemotePort)),
+ },
+ Pid: int32(m.DwOwningPid),
+ Status: tcpStatuses[mibTCPState(m.DwState)],
+ }
+
+ return ns
+}
+
+type mibTCP6TableOwnerPid struct {
+ DwNumEntries uint32
+ Table [anySize]mibTCP6RowOwnerPid
+}
+
+type (
+ pmibTCPTableOwnerPidAll *mibTCPTableOwnerPid
+ pmibTCP6TableOwnerPidAll *mibTCP6TableOwnerPid
+)
+
+// UDP
+
+type mibUDPRowOwnerPid struct {
+ DwLocalAddr uint32
+ DwLocalPort uint32
+ DwOwningPid uint32
+}
+
+func (m *mibUDPRowOwnerPid) convertToConnectionStat() ConnectionStat {
+ ns := ConnectionStat{
+ Family: kindUDP4.family,
+ Type: kindUDP4.sockType,
+ Laddr: Addr{
+ IP: parseIPv4HexString(m.DwLocalAddr),
+ Port: uint32(decodePort(m.DwLocalPort)),
+ },
+ Pid: int32(m.DwOwningPid),
+ }
+
+ return ns
+}
+
+type mibUDPTableOwnerPid struct {
+ DwNumEntries uint32
+ Table [anySize]mibUDPRowOwnerPid
+}
+
+type mibUDP6RowOwnerPid struct {
+ UcLocalAddr [16]byte
+ DwLocalScopeId uint32
+ DwLocalPort uint32
+ DwOwningPid uint32
+}
+
+func (m *mibUDP6RowOwnerPid) convertToConnectionStat() ConnectionStat {
+ ns := ConnectionStat{
+ Family: kindUDP6.family,
+ Type: kindUDP6.sockType,
+ Laddr: Addr{
+ IP: parseIPv6HexString(m.UcLocalAddr),
+ Port: uint32(decodePort(m.DwLocalPort)),
+ },
+ Pid: int32(m.DwOwningPid),
+ }
+
+ return ns
+}
+
+type mibUDP6TableOwnerPid struct {
+ DwNumEntries uint32
+ Table [anySize]mibUDP6RowOwnerPid
+}
+
+type (
+ pmibUDPTableOwnerPid *mibUDPTableOwnerPid
+ pmibUDP6TableOwnerPid *mibUDP6TableOwnerPid
+)
+
+func decodePort(port uint32) uint16 {
+ return syscall.Ntohs(uint16(port))
+}
+
+func parseIPv4HexString(addr uint32) string {
+ return fmt.Sprintf("%d.%d.%d.%d", addr&255, addr>>8&255, addr>>16&255, addr>>24&255)
+}
+
+func parseIPv6HexString(addr [16]byte) string {
+ var ret [16]byte
+ for i := 0; i < 16; i++ {
+ ret[i] = uint8(addr[i])
+ }
+
+ // convert []byte to net.IP
+ ip := net.IP(ret[:])
+ return ip.String()
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process.go
new file mode 100644
index 000000000..0ca26c210
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process.go
@@ -0,0 +1,620 @@
+package process
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/mem"
+ "github.com/shirou/gopsutil/v3/net"
+)
+
+var (
+ invoke common.Invoker = common.Invoke{}
+ ErrorNoChildren = errors.New("process does not have children")
+ ErrorProcessNotRunning = errors.New("process does not exist")
+ ErrorNotPermitted = errors.New("operation not permitted")
+)
+
+type Process struct {
+ Pid int32 `json:"pid"`
+ name string
+ status string
+ parent int32
+ parentMutex sync.RWMutex // for windows ppid cache
+ numCtxSwitches *NumCtxSwitchesStat
+ uids []int32
+ gids []int32
+ groups []int32
+ numThreads int32
+ memInfo *MemoryInfoStat
+ sigInfo *SignalInfoStat
+ createTime int64
+
+ lastCPUTimes *cpu.TimesStat
+ lastCPUTime time.Time
+
+ tgid int32
+}
+
+// Process status
+const (
+ // Running marks a task a running or runnable (on the run queue)
+ Running = "running"
+ // Blocked marks a task waiting on a short, uninterruptible operation (usually I/O)
+ Blocked = "blocked"
+ // Idle marks a task sleeping for more than about 20 seconds
+ Idle = "idle"
+ // Lock marks a task waiting to acquire a lock
+ Lock = "lock"
+ // Sleep marks task waiting for short, interruptible operation
+ Sleep = "sleep"
+ // Stop marks a stopped process
+ Stop = "stop"
+ // Wait marks an idle interrupt thread (or paging in pre 2.6.xx Linux)
+ Wait = "wait"
+ // Zombie marks a defunct process, terminated but not reaped by its parent
+ Zombie = "zombie"
+
+ // Solaris states. See https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115
+ Daemon = "daemon"
+ Detached = "detached"
+ System = "system"
+ Orphan = "orphan"
+
+ UnknownState = ""
+)
+
+type OpenFilesStat struct {
+ Path string `json:"path"`
+ Fd uint64 `json:"fd"`
+}
+
+type MemoryInfoStat struct {
+ RSS uint64 `json:"rss"` // bytes
+ VMS uint64 `json:"vms"` // bytes
+ HWM uint64 `json:"hwm"` // bytes
+ Data uint64 `json:"data"` // bytes
+ Stack uint64 `json:"stack"` // bytes
+ Locked uint64 `json:"locked"` // bytes
+ Swap uint64 `json:"swap"` // bytes
+}
+
+type SignalInfoStat struct {
+ PendingProcess uint64 `json:"pending_process"`
+ PendingThread uint64 `json:"pending_thread"`
+ Blocked uint64 `json:"blocked"`
+ Ignored uint64 `json:"ignored"`
+ Caught uint64 `json:"caught"`
+}
+
+type RlimitStat struct {
+ Resource int32 `json:"resource"`
+ Soft uint64 `json:"soft"`
+ Hard uint64 `json:"hard"`
+ Used uint64 `json:"used"`
+}
+
+type IOCountersStat struct {
+ ReadCount uint64 `json:"readCount"`
+ WriteCount uint64 `json:"writeCount"`
+ ReadBytes uint64 `json:"readBytes"`
+ WriteBytes uint64 `json:"writeBytes"`
+}
+
+type NumCtxSwitchesStat struct {
+ Voluntary int64 `json:"voluntary"`
+ Involuntary int64 `json:"involuntary"`
+}
+
+type PageFaultsStat struct {
+ MinorFaults uint64 `json:"minorFaults"`
+ MajorFaults uint64 `json:"majorFaults"`
+ ChildMinorFaults uint64 `json:"childMinorFaults"`
+ ChildMajorFaults uint64 `json:"childMajorFaults"`
+}
+
+// Resource limit constants are from /usr/include/x86_64-linux-gnu/bits/resource.h
+// from libc6-dev package in Ubuntu 16.10
+const (
+ RLIMIT_CPU int32 = 0
+ RLIMIT_FSIZE int32 = 1
+ RLIMIT_DATA int32 = 2
+ RLIMIT_STACK int32 = 3
+ RLIMIT_CORE int32 = 4
+ RLIMIT_RSS int32 = 5
+ RLIMIT_NPROC int32 = 6
+ RLIMIT_NOFILE int32 = 7
+ RLIMIT_MEMLOCK int32 = 8
+ RLIMIT_AS int32 = 9
+ RLIMIT_LOCKS int32 = 10
+ RLIMIT_SIGPENDING int32 = 11
+ RLIMIT_MSGQUEUE int32 = 12
+ RLIMIT_NICE int32 = 13
+ RLIMIT_RTPRIO int32 = 14
+ RLIMIT_RTTIME int32 = 15
+)
+
+func (p Process) String() string {
+ s, _ := json.Marshal(p)
+ return string(s)
+}
+
+func (o OpenFilesStat) String() string {
+ s, _ := json.Marshal(o)
+ return string(s)
+}
+
+func (m MemoryInfoStat) String() string {
+ s, _ := json.Marshal(m)
+ return string(s)
+}
+
+func (r RlimitStat) String() string {
+ s, _ := json.Marshal(r)
+ return string(s)
+}
+
+func (i IOCountersStat) String() string {
+ s, _ := json.Marshal(i)
+ return string(s)
+}
+
+func (p NumCtxSwitchesStat) String() string {
+ s, _ := json.Marshal(p)
+ return string(s)
+}
+
+// Pids returns a slice of process ID list which are running now.
+func Pids() ([]int32, error) {
+ return PidsWithContext(context.Background())
+}
+
+func PidsWithContext(ctx context.Context) ([]int32, error) {
+ pids, err := pidsWithContext(ctx)
+ sort.Slice(pids, func(i, j int) bool { return pids[i] < pids[j] })
+ return pids, err
+}
+
+// Processes returns a slice of pointers to Process structs for all
+// currently running processes.
+func Processes() ([]*Process, error) {
+ return ProcessesWithContext(context.Background())
+}
+
+// NewProcess creates a new Process instance, it only stores the pid and
+// checks that the process exists. Other method on Process can be used
+// to get more information about the process. An error will be returned
+// if the process does not exist.
+func NewProcess(pid int32) (*Process, error) {
+ return NewProcessWithContext(context.Background(), pid)
+}
+
+func NewProcessWithContext(ctx context.Context, pid int32) (*Process, error) {
+ p := &Process{
+ Pid: pid,
+ }
+
+ exists, err := PidExistsWithContext(ctx, pid)
+ if err != nil {
+ return p, err
+ }
+ if !exists {
+ return p, ErrorProcessNotRunning
+ }
+ p.CreateTimeWithContext(ctx)
+ return p, nil
+}
+
+func PidExists(pid int32) (bool, error) {
+ return PidExistsWithContext(context.Background(), pid)
+}
+
+// Background returns true if the process is in background, false otherwise.
+func (p *Process) Background() (bool, error) {
+ return p.BackgroundWithContext(context.Background())
+}
+
+func (p *Process) BackgroundWithContext(ctx context.Context) (bool, error) {
+ fg, err := p.ForegroundWithContext(ctx)
+ if err != nil {
+ return false, err
+ }
+ return !fg, err
+}
+
+// If interval is 0, return difference from last call(non-blocking).
+// If interval > 0, wait interval sec and return difference between start and end.
+func (p *Process) Percent(interval time.Duration) (float64, error) {
+ return p.PercentWithContext(context.Background(), interval)
+}
+
+func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration) (float64, error) {
+ cpuTimes, err := p.TimesWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ now := time.Now()
+
+ if interval > 0 {
+ p.lastCPUTimes = cpuTimes
+ p.lastCPUTime = now
+ if err := common.Sleep(ctx, interval); err != nil {
+ return 0, err
+ }
+ cpuTimes, err = p.TimesWithContext(ctx)
+ now = time.Now()
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ if p.lastCPUTimes == nil {
+ // invoked first time
+ p.lastCPUTimes = cpuTimes
+ p.lastCPUTime = now
+ return 0, nil
+ }
+ }
+
+ numcpu := runtime.NumCPU()
+ delta := (now.Sub(p.lastCPUTime).Seconds()) * float64(numcpu)
+ ret := calculatePercent(p.lastCPUTimes, cpuTimes, delta, numcpu)
+ p.lastCPUTimes = cpuTimes
+ p.lastCPUTime = now
+ return ret, nil
+}
+
+// IsRunning returns whether the process is still running or not.
+func (p *Process) IsRunning() (bool, error) {
+ return p.IsRunningWithContext(context.Background())
+}
+
+func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) {
+ createTime, err := p.CreateTimeWithContext(ctx)
+ if err != nil {
+ return false, err
+ }
+ p2, err := NewProcessWithContext(ctx, p.Pid)
+ if errors.Is(err, ErrorProcessNotRunning) {
+ return false, nil
+ }
+ createTime2, err := p2.CreateTimeWithContext(ctx)
+ if err != nil {
+ return false, err
+ }
+ return createTime == createTime2, nil
+}
+
+// CreateTime returns created time of the process in milliseconds since the epoch, in UTC.
+func (p *Process) CreateTime() (int64, error) {
+ return p.CreateTimeWithContext(context.Background())
+}
+
+func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) {
+ if p.createTime != 0 {
+ return p.createTime, nil
+ }
+ createTime, err := p.createTimeWithContext(ctx)
+ p.createTime = createTime
+ return p.createTime, err
+}
+
+func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 {
+ if delta == 0 {
+ return 0
+ }
+ delta_proc := t2.Total() - t1.Total()
+ overall_percent := ((delta_proc / delta) * 100) * float64(numcpu)
+ return overall_percent
+}
+
+// MemoryPercent returns how many percent of the total RAM this process uses
+func (p *Process) MemoryPercent() (float32, error) {
+ return p.MemoryPercentWithContext(context.Background())
+}
+
+func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error) {
+ machineMemory, err := mem.VirtualMemoryWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ total := machineMemory.Total
+
+ processMemory, err := p.MemoryInfoWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ used := processMemory.RSS
+
+ return (100 * float32(used) / float32(total)), nil
+}
+
+// CPU_Percent returns how many percent of the CPU time this process uses
+func (p *Process) CPUPercent() (float64, error) {
+ return p.CPUPercentWithContext(context.Background())
+}
+
+func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) {
+ crt_time, err := p.createTimeWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+
+ cput, err := p.TimesWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+
+ created := time.Unix(0, crt_time*int64(time.Millisecond))
+ totalTime := time.Since(created).Seconds()
+ if totalTime <= 0 {
+ return 0, nil
+ }
+
+ return 100 * cput.Total() / totalTime, nil
+}
+
+// Groups returns all group IDs(include supplementary groups) of the process as a slice of the int
+func (p *Process) Groups() ([]int32, error) {
+ return p.GroupsWithContext(context.Background())
+}
+
+// Ppid returns Parent Process ID of the process.
+func (p *Process) Ppid() (int32, error) {
+ return p.PpidWithContext(context.Background())
+}
+
+// Name returns name of the process.
+func (p *Process) Name() (string, error) {
+ return p.NameWithContext(context.Background())
+}
+
+// Exe returns executable path of the process.
+func (p *Process) Exe() (string, error) {
+ return p.ExeWithContext(context.Background())
+}
+
+// Cmdline returns the command line arguments of the process as a string with
+// each argument separated by 0x20 ascii character.
+func (p *Process) Cmdline() (string, error) {
+ return p.CmdlineWithContext(context.Background())
+}
+
+// CmdlineSlice returns the command line arguments of the process as a slice with each
+// element being an argument.
+func (p *Process) CmdlineSlice() ([]string, error) {
+ return p.CmdlineSliceWithContext(context.Background())
+}
+
+// Cwd returns current working directory of the process.
+func (p *Process) Cwd() (string, error) {
+ return p.CwdWithContext(context.Background())
+}
+
+// Parent returns parent Process of the process.
+func (p *Process) Parent() (*Process, error) {
+ return p.ParentWithContext(context.Background())
+}
+
+// ParentWithContext returns parent Process of the process.
+func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) {
+ ppid, err := p.PpidWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return NewProcessWithContext(ctx, ppid)
+}
+
+// Status returns the process status.
+// Return value could be one of these.
+// R: Running S: Sleep T: Stop I: Idle
+// Z: Zombie W: Wait L: Lock
+// The character is same within all supported platforms.
+func (p *Process) Status() ([]string, error) {
+ return p.StatusWithContext(context.Background())
+}
+
+// Foreground returns true if the process is in foreground, false otherwise.
+func (p *Process) Foreground() (bool, error) {
+ return p.ForegroundWithContext(context.Background())
+}
+
+// Uids returns user ids of the process as a slice of the int
+func (p *Process) Uids() ([]int32, error) {
+ return p.UidsWithContext(context.Background())
+}
+
+// Gids returns group ids of the process as a slice of the int
+func (p *Process) Gids() ([]int32, error) {
+ return p.GidsWithContext(context.Background())
+}
+
+// Terminal returns a terminal which is associated with the process.
+func (p *Process) Terminal() (string, error) {
+ return p.TerminalWithContext(context.Background())
+}
+
+// Nice returns a nice value (priority).
+func (p *Process) Nice() (int32, error) {
+ return p.NiceWithContext(context.Background())
+}
+
+// IOnice returns process I/O nice value (priority).
+func (p *Process) IOnice() (int32, error) {
+ return p.IOniceWithContext(context.Background())
+}
+
+// Rlimit returns Resource Limits.
+func (p *Process) Rlimit() ([]RlimitStat, error) {
+ return p.RlimitWithContext(context.Background())
+}
+
+// RlimitUsage returns Resource Limits.
+// If gatherUsed is true, the currently used value will be gathered and added
+// to the resulting RlimitStat.
+func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) {
+ return p.RlimitUsageWithContext(context.Background(), gatherUsed)
+}
+
+// IOCounters returns IO Counters.
+func (p *Process) IOCounters() (*IOCountersStat, error) {
+ return p.IOCountersWithContext(context.Background())
+}
+
+// NumCtxSwitches returns the number of the context switches of the process.
+func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) {
+ return p.NumCtxSwitchesWithContext(context.Background())
+}
+
+// NumFDs returns the number of File Descriptors used by the process.
+func (p *Process) NumFDs() (int32, error) {
+ return p.NumFDsWithContext(context.Background())
+}
+
+// NumThreads returns the number of threads used by the process.
+func (p *Process) NumThreads() (int32, error) {
+ return p.NumThreadsWithContext(context.Background())
+}
+
+func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) {
+ return p.ThreadsWithContext(context.Background())
+}
+
+// Times returns CPU times of the process.
+func (p *Process) Times() (*cpu.TimesStat, error) {
+ return p.TimesWithContext(context.Background())
+}
+
+// CPUAffinity returns CPU affinity of the process.
+func (p *Process) CPUAffinity() ([]int32, error) {
+ return p.CPUAffinityWithContext(context.Background())
+}
+
+// MemoryInfo returns generic process memory information,
+// such as RSS and VMS.
+func (p *Process) MemoryInfo() (*MemoryInfoStat, error) {
+ return p.MemoryInfoWithContext(context.Background())
+}
+
+// MemoryInfoEx returns platform-specific process memory information.
+func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) {
+ return p.MemoryInfoExWithContext(context.Background())
+}
+
+// PageFaultsInfo returns the process's page fault counters.
+func (p *Process) PageFaults() (*PageFaultsStat, error) {
+ return p.PageFaultsWithContext(context.Background())
+}
+
+// Children returns the children of the process represented as a slice
+// of pointers to Process type.
+func (p *Process) Children() ([]*Process, error) {
+ return p.ChildrenWithContext(context.Background())
+}
+
+// OpenFiles returns a slice of OpenFilesStat opend by the process.
+// OpenFilesStat includes a file path and file descriptor.
+func (p *Process) OpenFiles() ([]OpenFilesStat, error) {
+ return p.OpenFilesWithContext(context.Background())
+}
+
+// Connections returns a slice of net.ConnectionStat used by the process.
+// This returns all kind of the connection. This means TCP, UDP or UNIX.
+func (p *Process) Connections() ([]net.ConnectionStat, error) {
+ return p.ConnectionsWithContext(context.Background())
+}
+
+// Connections returns a slice of net.ConnectionStat used by the process at most `max`.
+func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) {
+ return p.ConnectionsMaxWithContext(context.Background(), max)
+}
+
+// MemoryMaps get memory maps from /proc/(pid)/smaps
+func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) {
+ return p.MemoryMapsWithContext(context.Background(), grouped)
+}
+
+// Tgid returns thread group id of the process.
+func (p *Process) Tgid() (int32, error) {
+ return p.TgidWithContext(context.Background())
+}
+
+// SendSignal sends a unix.Signal to the process.
+func (p *Process) SendSignal(sig Signal) error {
+ return p.SendSignalWithContext(context.Background(), sig)
+}
+
+// Suspend sends SIGSTOP to the process.
+func (p *Process) Suspend() error {
+ return p.SuspendWithContext(context.Background())
+}
+
+// Resume sends SIGCONT to the process.
+func (p *Process) Resume() error {
+ return p.ResumeWithContext(context.Background())
+}
+
+// Terminate sends SIGTERM to the process.
+func (p *Process) Terminate() error {
+ return p.TerminateWithContext(context.Background())
+}
+
+// Kill sends SIGKILL to the process.
+func (p *Process) Kill() error {
+ return p.KillWithContext(context.Background())
+}
+
+// Username returns a username of the process.
+func (p *Process) Username() (string, error) {
+ return p.UsernameWithContext(context.Background())
+}
+
+// Environ returns the environment variables of the process.
+func (p *Process) Environ() ([]string, error) {
+ return p.EnvironWithContext(context.Background())
+}
+
+// convertStatusChar as reported by the ps command across different platforms.
+func convertStatusChar(letter string) string {
+ // Sources
+ // Darwin: http://www.mywebuniversity.com/Man_Pages/Darwin/man_ps.html
+ // FreeBSD: https://www.freebsd.org/cgi/man.cgi?ps
+ // Linux https://man7.org/linux/man-pages/man1/ps.1.html
+ // OpenBSD: https://man.openbsd.org/ps.1#state
+ // Solaris: https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115
+ switch letter {
+ case "A":
+ return Daemon
+ case "D", "U":
+ return Blocked
+ case "E":
+ return Detached
+ case "I":
+ return Idle
+ case "L":
+ return Lock
+ case "O":
+ return Orphan
+ case "R":
+ return Running
+ case "S":
+ return Sleep
+ case "T", "t":
+ // "t" is used by Linux to signal stopped by the debugger during tracing
+ return Stop
+ case "W":
+ return Wait
+ case "Y":
+ return System
+ case "Z":
+ return Zombie
+ default:
+ return UnknownState
+ }
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go
new file mode 100644
index 000000000..263829ffa
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go
@@ -0,0 +1,76 @@
+//go:build darwin || freebsd || openbsd
+// +build darwin freebsd openbsd
+
+package process
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+type MemoryInfoExStat struct{}
+
+type MemoryMapsStat struct{}
+
+func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func parseKinfoProc(buf []byte) (KinfoProc, error) {
+ var k KinfoProc
+ br := bytes.NewReader(buf)
+ err := common.Read(br, binary.LittleEndian, &k)
+ return k, err
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go
new file mode 100644
index 000000000..61b340b63
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go
@@ -0,0 +1,326 @@
+//go:build darwin
+// +build darwin
+
+package process
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/net"
+ "github.com/tklauser/go-sysconf"
+ "golang.org/x/sys/unix"
+)
+
+// copied from sys/sysctl.h
+const (
+ CTLKern = 1 // "high kernel": proc, limits
+ KernProc = 14 // struct: process entries
+ KernProcPID = 1 // by process id
+ KernProcProc = 8 // only return procs
+ KernProcAll = 0 // everything
+ KernProcPathname = 12 // path to executable
+)
+
+var clockTicks = 100 // default value
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ clockTicks = int(clkTck)
+ }
+}
+
+type _Ctype_struct___0 struct {
+ Pad uint64
+}
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ var ret []int32
+
+ kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all")
+ if err != nil {
+ return ret, err
+ }
+
+ for _, proc := range kprocs {
+ ret = append(ret, int32(proc.Proc.P_pid))
+ }
+
+ return ret, nil
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+
+ return k.Eproc.Ppid, nil
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return "", err
+ }
+
+ name := common.ByteToString(k.Proc.P_comm[:])
+
+ if len(name) >= 15 {
+ cmdName, err := p.cmdNameWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(cmdName) > 0 {
+ extendedName := filepath.Base(cmdName)
+ if strings.HasPrefix(extendedName, p.name) {
+ name = extendedName
+ } else {
+ name = cmdName
+ }
+ }
+ }
+
+ return name, nil
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+
+ return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ r, err := callPsWithContext(ctx, "state", p.Pid, false, false)
+ if err != nil {
+ return []string{""}, err
+ }
+ status := convertStatusChar(r[0][0][0:1])
+ return []string{status}, err
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details
+ pid := p.Pid
+ out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid)))
+ if err != nil {
+ return false, err
+ }
+ return strings.IndexByte(string(out), '+') != -1, nil
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html
+ userEffectiveUID := int32(k.Eproc.Ucred.Uid)
+
+ return []int32{userEffectiveUID}, nil
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ gids := make([]int32, 0, 3)
+ gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_svgid))
+
+ return gids, nil
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+ // k, err := p.getKProc()
+ // if err != nil {
+ // return nil, err
+ // }
+
+ // groups := make([]int32, k.Eproc.Ucred.Ngroups)
+ // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ {
+ // groups[i] = int32(k.Eproc.Ucred.Groups[i])
+ // }
+
+ // return groups, nil
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+ /*
+ k, err := p.getKProc()
+ if err != nil {
+ return "", err
+ }
+
+ ttyNr := uint64(k.Eproc.Tdev)
+ termmap, err := getTerminalMap()
+ if err != nil {
+ return "", err
+ }
+
+ return termmap[ttyNr], nil
+ */
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+ return int32(k.Proc.P_nice), nil
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func convertCPUTimes(s string) (ret float64, err error) {
+ var t int
+ var _tmp string
+ if strings.Contains(s, ":") {
+ _t := strings.Split(s, ":")
+ switch len(_t) {
+ case 3:
+ hour, err := strconv.Atoi(_t[0])
+ if err != nil {
+ return ret, err
+ }
+ t += hour * 60 * 60 * clockTicks
+
+ mins, err := strconv.Atoi(_t[1])
+ if err != nil {
+ return ret, err
+ }
+ t += mins * 60 * clockTicks
+ _tmp = _t[2]
+ case 2:
+ mins, err := strconv.Atoi(_t[0])
+ if err != nil {
+ return ret, err
+ }
+ t += mins * 60 * clockTicks
+ _tmp = _t[1]
+ case 1, 0:
+ _tmp = s
+ default:
+ return ret, fmt.Errorf("wrong cpu time string")
+ }
+ } else {
+ _tmp = s
+ }
+
+ _t := strings.Split(_tmp, ".")
+ if err != nil {
+ return ret, err
+ }
+ h, err := strconv.Atoi(_t[0])
+ t += h * clockTicks
+ h, err = strconv.Atoi(_t[1])
+ t += h
+ return float64(t) / float64(clockTicks), nil
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]*Process, 0, len(pids))
+ for _, pid := range pids {
+ np, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, np)
+ }
+ return ret, nil
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return net.ConnectionsPidWithContext(ctx, "all", p.Pid)
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max)
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ out := []*Process{}
+
+ pids, err := PidsWithContext(ctx)
+ if err != nil {
+ return out, err
+ }
+
+ for _, pid := range pids {
+ p, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ continue
+ }
+ out = append(out, p)
+ }
+
+ return out, nil
+}
+
+// Returns a proc as defined here:
+// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html
+func (p *Process) getKProc() (*unix.KinfoProc, error) {
+ return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid))
+}
+
+// call ps command.
+// Return value deletes Header line(you must not input wrong arg).
+// And splited by Space. Caller have responsibility to manage.
+// If passed arg pid is 0, get information from all process.
+func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) {
+ var cmd []string
+ if pid == 0 { // will get from all processes.
+ cmd = []string{"-ax", "-o", arg}
+ } else if threadOption {
+ cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))}
+ } else {
+ cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))}
+ }
+ if nameOption {
+ cmd = append(cmd, "-c")
+ }
+ out, err := invoke.CommandWithContext(ctx, "ps", cmd...)
+ if err != nil {
+ return [][]string{}, err
+ }
+ lines := strings.Split(string(out), "\n")
+
+ var ret [][]string
+ for _, l := range lines[1:] {
+ var lr []string
+ if nameOption {
+ lr = append(lr, l)
+ } else {
+ for _, r := range strings.Split(l, " ") {
+ if r == "" {
+ continue
+ }
+ lr = append(lr, strings.TrimSpace(r))
+ }
+ }
+ if len(lr) != 0 {
+ ret = append(ret, lr)
+ }
+ }
+
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go
new file mode 100644
index 000000000..b353e5eac
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go
@@ -0,0 +1,236 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_darwin.go
+
+package process
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int32
+ Pad_cgo_0 [4]byte
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type UGid_t uint32
+
+type KinfoProc struct {
+ Proc ExternProc
+ Eproc Eproc
+}
+
+type Eproc struct {
+ Paddr *uint64
+ Sess *Session
+ Pcred Upcred
+ Ucred Uucred
+ Pad_cgo_0 [4]byte
+ Vm Vmspace
+ Ppid int32
+ Pgid int32
+ Jobc int16
+ Pad_cgo_1 [2]byte
+ Tdev int32
+ Tpgid int32
+ Pad_cgo_2 [4]byte
+ Tsess *Session
+ Wmesg [8]int8
+ Xsize int32
+ Xrssize int16
+ Xccount int16
+ Xswrss int16
+ Pad_cgo_3 [2]byte
+ Flag int32
+ Login [12]int8
+ Spare [4]int32
+ Pad_cgo_4 [4]byte
+}
+
+type Proc struct{}
+
+type Session struct{}
+
+type ucred struct {
+ Link _Ctype_struct___0
+ Ref uint64
+ Posix Posix_cred
+ Label *Label
+ Audit Au_session
+}
+
+type Uucred struct {
+ Ref int32
+ UID uint32
+ Ngroups int16
+ Pad_cgo_0 [2]byte
+ Groups [16]uint32
+}
+
+type Upcred struct {
+ Pc_lock [72]int8
+ Pc_ucred *ucred
+ P_ruid uint32
+ P_svuid uint32
+ P_rgid uint32
+ P_svgid uint32
+ P_refcnt int32
+ Pad_cgo_0 [4]byte
+}
+
+type Vmspace struct {
+ Dummy int32
+ Pad_cgo_0 [4]byte
+ Dummy2 *int8
+ Dummy3 [5]int32
+ Pad_cgo_1 [4]byte
+ Dummy4 [3]*int8
+}
+
+type Sigacts struct{}
+
+type ExternProc struct {
+ P_un [16]byte
+ P_vmspace uint64
+ P_sigacts uint64
+ Pad_cgo_0 [3]byte
+ P_flag int32
+ P_stat int8
+ P_pid int32
+ P_oppid int32
+ P_dupfd int32
+ Pad_cgo_1 [4]byte
+ User_stack uint64
+ Exit_thread uint64
+ P_debugger int32
+ Sigwait int32
+ P_estcpu uint32
+ P_cpticks int32
+ P_pctcpu uint32
+ Pad_cgo_2 [4]byte
+ P_wchan uint64
+ P_wmesg uint64
+ P_swtime uint32
+ P_slptime uint32
+ P_realtimer Itimerval
+ P_rtime Timeval
+ P_uticks uint64
+ P_sticks uint64
+ P_iticks uint64
+ P_traceflag int32
+ Pad_cgo_3 [4]byte
+ P_tracep uint64
+ P_siglist int32
+ Pad_cgo_4 [4]byte
+ P_textvp uint64
+ P_holdcnt int32
+ P_sigmask uint32
+ P_sigignore uint32
+ P_sigcatch uint32
+ P_priority uint8
+ P_usrpri uint8
+ P_nice int8
+ P_comm [17]int8
+ Pad_cgo_5 [4]byte
+ P_pgrp uint64
+ P_addr uint64
+ P_xstat uint16
+ P_acflag uint16
+ Pad_cgo_6 [4]byte
+ P_ru uint64
+}
+
+type Itimerval struct {
+ Interval Timeval
+ Value Timeval
+}
+
+type Vnode struct{}
+
+type Pgrp struct{}
+
+type UserStruct struct{}
+
+type Au_session struct {
+ Aia_p *AuditinfoAddr
+ Mask AuMask
+}
+
+type Posix_cred struct {
+ UID uint32
+ Ruid uint32
+ Svuid uint32
+ Ngroups int16
+ Pad_cgo_0 [2]byte
+ Groups [16]uint32
+ Rgid uint32
+ Svgid uint32
+ Gmuid uint32
+ Flags int32
+}
+
+type Label struct{}
+
+type AuditinfoAddr struct {
+ Auid uint32
+ Mask AuMask
+ Termid AuTidAddr
+ Asid int32
+ Flags uint64
+}
+
+type AuMask struct {
+ Success uint32
+ Failure uint32
+}
+
+type AuTidAddr struct {
+ Port int32
+ Type uint32
+ Addr [4]uint32
+}
+
+type UcredQueue struct {
+ Next *ucred
+ Prev **ucred
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go
new file mode 100644
index 000000000..cbd6bdc79
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go
@@ -0,0 +1,213 @@
+//go:build darwin && arm64
+// +build darwin,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs process/types_darwin.go
+
+package process
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int32
+ Pad_cgo_0 [4]byte
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type UGid_t uint32
+
+type KinfoProc struct {
+ Proc ExternProc
+ Eproc Eproc
+}
+
+type Eproc struct {
+ Paddr *Proc
+ Sess *Session
+ Pcred Upcred
+ Ucred Uucred
+ Vm Vmspace
+ Ppid int32
+ Pgid int32
+ Jobc int16
+ Tdev int32
+ Tpgid int32
+ Tsess *Session
+ Wmesg [8]int8
+ Xsize int32
+ Xrssize int16
+ Xccount int16
+ Xswrss int16
+ Flag int32
+ Login [12]int8
+ Spare [4]int32
+ Pad_cgo_0 [4]byte
+}
+
+type Proc struct{}
+
+type Session struct{}
+
+type ucred struct{}
+
+type Uucred struct {
+ Ref int32
+ UID uint32
+ Ngroups int16
+ Groups [16]uint32
+}
+
+type Upcred struct {
+ Pc_lock [72]int8
+ Pc_ucred *ucred
+ P_ruid uint32
+ P_svuid uint32
+ P_rgid uint32
+ P_svgid uint32
+ P_refcnt int32
+ Pad_cgo_0 [4]byte
+}
+
+type Vmspace struct {
+ Dummy int32
+ Dummy2 *int8
+ Dummy3 [5]int32
+ Dummy4 [3]*int8
+}
+
+type Sigacts struct{}
+
+type ExternProc struct {
+ P_un [16]byte
+ P_vmspace uint64
+ P_sigacts uint64
+ Pad_cgo_0 [3]byte
+ P_flag int32
+ P_stat int8
+ P_pid int32
+ P_oppid int32
+ P_dupfd int32
+ Pad_cgo_1 [4]byte
+ User_stack uint64
+ Exit_thread uint64
+ P_debugger int32
+ Sigwait int32
+ P_estcpu uint32
+ P_cpticks int32
+ P_pctcpu uint32
+ Pad_cgo_2 [4]byte
+ P_wchan uint64
+ P_wmesg uint64
+ P_swtime uint32
+ P_slptime uint32
+ P_realtimer Itimerval
+ P_rtime Timeval
+ P_uticks uint64
+ P_sticks uint64
+ P_iticks uint64
+ P_traceflag int32
+ Pad_cgo_3 [4]byte
+ P_tracep uint64
+ P_siglist int32
+ Pad_cgo_4 [4]byte
+ P_textvp uint64
+ P_holdcnt int32
+ P_sigmask uint32
+ P_sigignore uint32
+ P_sigcatch uint32
+ P_priority uint8
+ P_usrpri uint8
+ P_nice int8
+ P_comm [17]int8
+ Pad_cgo_5 [4]byte
+ P_pgrp uint64
+ P_addr uint64
+ P_xstat uint16
+ P_acflag uint16
+ Pad_cgo_6 [4]byte
+ P_ru uint64
+}
+
+type Itimerval struct {
+ Interval Timeval
+ Value Timeval
+}
+
+type Vnode struct{}
+
+type Pgrp struct{}
+
+type UserStruct struct{}
+
+type Au_session struct {
+ Aia_p *AuditinfoAddr
+ Mask AuMask
+}
+
+type Posix_cred struct{}
+
+type Label struct{}
+
+type AuditinfoAddr struct {
+ Auid uint32
+ Mask AuMask
+ Termid AuTidAddr
+ Asid int32
+ Flags uint64
+}
+type AuMask struct {
+ Success uint32
+ Failure uint32
+}
+type AuTidAddr struct {
+ Port int32
+ Type uint32
+ Addr [4]uint32
+}
+
+type UcredQueue struct {
+ Next *ucred
+ Prev **ucred
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go
new file mode 100644
index 000000000..2ac413f10
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go
@@ -0,0 +1,219 @@
+//go:build darwin && cgo
+// +build darwin,cgo
+
+package process
+
+// #include
+// #include
+// #include
+// #include
+// #include
+// #include
+// #include
+import "C"
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+)
+
+var (
+ argMax int
+ timescaleToNanoSeconds float64
+)
+
+func init() {
+ argMax = getArgMax()
+ timescaleToNanoSeconds = getTimeScaleToNanoSeconds()
+}
+
+func getArgMax() int {
+ var (
+ mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX}
+ argmax C.int
+ size C.size_t = C.ulong(unsafe.Sizeof(argmax))
+ )
+ retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0)
+ if retval == 0 {
+ return int(argmax)
+ }
+ return 0
+}
+
+func getTimeScaleToNanoSeconds() float64 {
+ var timeBaseInfo C.struct_mach_timebase_info
+
+ C.mach_timebase_info(&timeBaseInfo)
+
+ return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom)
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ var c C.char // need a var for unsafe.Sizeof need a var
+ const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c)
+ buffer := (*C.char)(C.malloc(C.size_t(bufsize)))
+ defer C.free(unsafe.Pointer(buffer))
+
+ ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize))
+ if err != nil {
+ return "", err
+ }
+ if ret <= 0 {
+ return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret)
+ }
+
+ return C.GoString(buffer), nil
+}
+
+// CwdWithContext retrieves the Current Working Directory for the given process.
+// It uses the proc_pidinfo from libproc and will only work for processes the
+// EUID can access. Otherwise "operation not permitted" will be returned as the
+// error.
+// Note: This might also work for other *BSD OSs.
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ const vpiSize = C.sizeof_struct_proc_vnodepathinfo
+ vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize))
+ defer C.free(unsafe.Pointer(vpi))
+ ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize)
+ if err != nil {
+ // fmt.Printf("ret: %d %T\n", ret, err)
+ if err == syscall.EPERM {
+ return "", ErrorNotPermitted
+ }
+ return "", err
+ }
+ if ret <= 0 {
+ return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret)
+ }
+ if ret != C.sizeof_struct_proc_vnodepathinfo {
+ return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret)
+ }
+ return C.GoString(&vpi.pvi_cdir.vip_path[0]), err
+}
+
+func procArgs(pid int32) ([]byte, int, error) {
+ var (
+ mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)}
+ size C.size_t = C.ulong(argMax)
+ nargs C.int
+ result []byte
+ )
+ procargs := (*C.char)(C.malloc(C.ulong(argMax)))
+ defer C.free(unsafe.Pointer(procargs))
+ retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0)
+ if retval == 0 {
+ C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int)
+ result = C.GoBytes(unsafe.Pointer(procargs), C.int(size))
+ // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result))
+ return result, int(nargs), nil
+ }
+ return nil, 0, err
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ return p.cmdlineSliceWithContext(ctx, true)
+}
+
+func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) {
+ pargs, nargs, err := procArgs(p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ // The first bytes hold the nargs int, skip it.
+ args := bytes.Split((pargs)[C.sizeof_int:], []byte{0})
+ var argStr string
+ // The first element is the actual binary/command path.
+ // command := args[0]
+ var argSlice []string
+ // var envSlice []string
+ // All other, non-zero elements are arguments. The first "nargs" elements
+ // are the arguments. Everything else in the slice is then the environment
+ // of the process.
+ for _, arg := range args[1:] {
+ argStr = string(arg[:])
+ if len(argStr) > 0 {
+ if nargs > 0 {
+ argSlice = append(argSlice, argStr)
+ nargs--
+ continue
+ }
+ break
+ // envSlice = append(envSlice, argStr)
+ }
+ }
+ return argSlice, err
+}
+
+// cmdNameWithContext returns the command name (including spaces) without any arguments
+func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) {
+ r, err := p.cmdlineSliceWithContext(ctx, false)
+ if err != nil {
+ return "", err
+ }
+
+ if len(r) == 0 {
+ return "", nil
+ }
+
+ return r[0], err
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ r, err := p.CmdlineSliceWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ return strings.Join(r, " "), err
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ const tiSize = C.sizeof_struct_proc_taskinfo
+ ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize))
+
+ _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize)
+ if err != nil {
+ return 0, err
+ }
+
+ return int32(ti.pti_threadnum), nil
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ const tiSize = C.sizeof_struct_proc_taskinfo
+ ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize))
+
+ _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &cpu.TimesStat{
+ CPU: "cpu",
+ User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9,
+ System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9,
+ }
+ return ret, nil
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ const tiSize = C.sizeof_struct_proc_taskinfo
+ ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize))
+
+ _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &MemoryInfoStat{
+ RSS: uint64(ti.pti_resident_size),
+ VMS: uint64(ti.pti_virtual_size),
+ Swap: uint64(ti.pti_pageins),
+ }
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go
new file mode 100644
index 000000000..bc1d357df
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go
@@ -0,0 +1,127 @@
+//go:build darwin && !cgo
+// +build darwin,!cgo
+
+package process
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn")
+ if err != nil {
+ return "", fmt.Errorf("bad call to lsof: %s", err)
+ }
+ txtFound := 0
+ lines := strings.Split(string(out), "\n")
+ for i := 1; i < len(lines); i++ {
+ if lines[i] == "ftxt" {
+ txtFound++
+ if txtFound == 2 {
+ return lines[i-1][1:], nil
+ }
+ }
+ }
+ return "", fmt.Errorf("missing txt data returned by lsof")
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ r, err := callPsWithContext(ctx, "command", p.Pid, false, false)
+ if err != nil {
+ return "", err
+ }
+ return strings.Join(r[0], " "), err
+}
+
+func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) {
+ r, err := callPsWithContext(ctx, "command", p.Pid, false, true)
+ if err != nil {
+ return "", err
+ }
+ if len(r) > 0 && len(r[0]) > 0 {
+ return r[0][0], err
+ }
+
+ return "", err
+}
+
+// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each
+// element being an argument. Because of current deficiencies in the way that the command
+// line arguments are found, single arguments that have spaces in the will actually be
+// reported as two separate items. In order to do something better CGO would be needed
+// to use the native darwin functions.
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ r, err := callPsWithContext(ctx, "command", p.Pid, false, false)
+ if err != nil {
+ return nil, err
+ }
+ return r[0], err
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false)
+ if err != nil {
+ return 0, err
+ }
+ return int32(len(r)), nil
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false)
+ if err != nil {
+ return nil, err
+ }
+
+ utime, err := convertCPUTimes(r[0][0])
+ if err != nil {
+ return nil, err
+ }
+ stime, err := convertCPUTimes(r[0][1])
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &cpu.TimesStat{
+ CPU: "cpu",
+ User: utime,
+ System: stime,
+ }
+ return ret, nil
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false)
+ if err != nil {
+ return nil, err
+ }
+ rss, err := strconv.Atoi(r[0][0])
+ if err != nil {
+ return nil, err
+ }
+ vms, err := strconv.Atoi(r[0][1])
+ if err != nil {
+ return nil, err
+ }
+ pagein, err := strconv.Atoi(r[0][2])
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &MemoryInfoStat{
+ RSS: uint64(rss) * 1024,
+ VMS: uint64(vms) * 1024,
+ Swap: uint64(pagein),
+ }
+
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go
new file mode 100644
index 000000000..1a5d0c4b4
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go
@@ -0,0 +1,203 @@
+//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9
+// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!plan9
+
+package process
+
+import (
+ "context"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/net"
+)
+
+type Signal = syscall.Signal
+
+type MemoryMapsStat struct {
+ Path string `json:"path"`
+ Rss uint64 `json:"rss"`
+ Size uint64 `json:"size"`
+ Pss uint64 `json:"pss"`
+ SharedClean uint64 `json:"sharedClean"`
+ SharedDirty uint64 `json:"sharedDirty"`
+ PrivateClean uint64 `json:"privateClean"`
+ PrivateDirty uint64 `json:"privateDirty"`
+ Referenced uint64 `json:"referenced"`
+ Anonymous uint64 `json:"anonymous"`
+ Swap uint64 `json:"swap"`
+}
+
+type MemoryInfoExStat struct{}
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) {
+ return false, common.ErrNotImplementedError
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ return []string{""}, common.ErrNotImplementedError
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ return false, common.ErrNotImplementedError
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) SuspendWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) ResumeWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) TerminateWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) KillWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) UsernameWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go
new file mode 100644
index 000000000..779f8126a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go
@@ -0,0 +1,338 @@
+//go:build freebsd
+// +build freebsd
+
+package process
+
+import (
+ "bytes"
+ "context"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ cpu "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ net "github.com/shirou/gopsutil/v3/net"
+ "golang.org/x/sys/unix"
+)
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ var ret []int32
+ procs, err := ProcessesWithContext(ctx)
+ if err != nil {
+ return ret, nil
+ }
+
+ for _, p := range procs {
+ ret = append(ret, p.Pid)
+ }
+
+ return ret, nil
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+
+ return k.Ppid, nil
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return "", err
+ }
+ name := common.IntToString(k.Comm[:])
+
+ if len(name) >= 15 {
+ cmdlineSlice, err := p.CmdlineSliceWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(cmdlineSlice) > 0 {
+ extendedName := filepath.Base(cmdlineSlice[0])
+ if strings.HasPrefix(extendedName, p.name) {
+ name = extendedName
+ } else {
+ name = cmdlineSlice[0]
+ }
+ }
+ }
+
+ return name, nil
+}
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid}
+ buf, _, err := common.CallSyscall(mib)
+ if err != nil {
+ return "", err
+ }
+ ret := strings.FieldsFunc(string(buf), func(r rune) bool {
+ if r == '\u0000' {
+ return true
+ }
+ return false
+ })
+
+ return strings.Join(ret, " "), nil
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid}
+ buf, _, err := common.CallSyscall(mib)
+ if err != nil {
+ return nil, err
+ }
+ if len(buf) == 0 {
+ return nil, nil
+ }
+ if buf[len(buf)-1] == 0 {
+ buf = buf[:len(buf)-1]
+ }
+ parts := bytes.Split(buf, []byte{0})
+ var strParts []string
+ for _, p := range parts {
+ strParts = append(strParts, string(p))
+ }
+
+ return strParts, nil
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+ return int64(k.Start.Sec)*1000 + int64(k.Start.Usec)/1000, nil
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return []string{""}, err
+ }
+ var s string
+ switch k.Stat {
+ case SIDL:
+ s = Idle
+ case SRUN:
+ s = Running
+ case SSLEEP:
+ s = Sleep
+ case SSTOP:
+ s = Stop
+ case SZOMB:
+ s = Zombie
+ case SWAIT:
+ s = Wait
+ case SLOCK:
+ s = Lock
+ }
+
+ return []string{s}, nil
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details
+ pid := p.Pid
+ out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid)))
+ if err != nil {
+ return false, err
+ }
+ return strings.IndexByte(string(out), '+') != -1, nil
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ uids := make([]int32, 0, 3)
+
+ uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid))
+
+ return uids, nil
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ gids := make([]int32, 0, 3)
+ gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid))
+
+ return gids, nil
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ groups := make([]int32, k.Ngroups)
+ for i := int16(0); i < k.Ngroups; i++ {
+ groups[i] = int32(k.Groups[i])
+ }
+
+ return groups, nil
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return "", err
+ }
+
+ ttyNr := uint64(k.Tdev)
+
+ termmap, err := getTerminalMap()
+ if err != nil {
+ return "", err
+ }
+
+ return termmap[ttyNr], nil
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+ return int32(k.Nice), nil
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+ return &IOCountersStat{
+ ReadCount: uint64(k.Rusage.Inblock),
+ WriteCount: uint64(k.Rusage.Oublock),
+ }, nil
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+
+ return k.Numthreads, nil
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+ return &cpu.TimesStat{
+ CPU: "cpu",
+ User: float64(k.Rusage.Utime.Sec) + float64(k.Rusage.Utime.Usec)/1000000,
+ System: float64(k.Rusage.Stime.Sec) + float64(k.Rusage.Stime.Usec)/1000000,
+ }, nil
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+ v, err := unix.Sysctl("vm.stats.vm.v_page_size")
+ if err != nil {
+ return nil, err
+ }
+ pageSize := common.LittleEndian.Uint16([]byte(v))
+
+ return &MemoryInfoStat{
+ RSS: uint64(k.Rssize) * uint64(pageSize),
+ VMS: uint64(k.Size),
+ }, nil
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]*Process, 0, len(pids))
+ for _, pid := range pids {
+ np, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, np)
+ }
+ return ret, nil
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ results := []*Process{}
+
+ mib := []int32{CTLKern, KernProc, KernProcProc, 0}
+ buf, length, err := common.CallSyscall(mib)
+ if err != nil {
+ return results, err
+ }
+
+ // get kinfo_proc size
+ count := int(length / uint64(sizeOfKinfoProc))
+
+ // parse buf to procs
+ for i := 0; i < count; i++ {
+ b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc]
+ k, err := parseKinfoProc(b)
+ if err != nil {
+ continue
+ }
+ p, err := NewProcessWithContext(ctx, int32(k.Pid))
+ if err != nil {
+ continue
+ }
+
+ results = append(results, p)
+ }
+
+ return results, nil
+}
+
+func (p *Process) getKProc() (*KinfoProc, error) {
+ mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid}
+
+ buf, length, err := common.CallSyscall(mib)
+ if err != nil {
+ return nil, err
+ }
+ if length != sizeOfKinfoProc {
+ return nil, err
+ }
+
+ k, err := parseKinfoProc(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &k, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go
new file mode 100644
index 000000000..08ab333b4
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go
@@ -0,0 +1,192 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 14
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 7
+)
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x488
+ sizeOfKinfoProc = 0x300
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SWAIT = 6
+ SLOCK = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int32
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int32
+ Usec int32
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int32
+ Ixrss int32
+ Idrss int32
+ Isrss int32
+ Minflt int32
+ Majflt int32
+ Nswap int32
+ Inblock int32
+ Oublock int32
+ Msgsnd int32
+ Msgrcv int32
+ Nsignals int32
+ Nvcsw int32
+ Nivcsw int32
+}
+
+type Rlimit struct {
+ Cur int64
+ Max int64
+}
+
+type KinfoProc struct {
+ Structsize int32
+ Layout int32
+ Args int32 /* pargs */
+ Paddr int32 /* proc */
+ Addr int32 /* user */
+ Tracep int32 /* vnode */
+ Textvp int32 /* vnode */
+ Fd int32 /* filedesc */
+ Vmspace int32 /* vmspace */
+ Wchan int32
+ Pid int32
+ Ppid int32
+ Pgid int32
+ Tpgid int32
+ Sid int32
+ Tsid int32
+ Jobc int16
+ Spare_short1 int16
+ Tdev uint32
+ Siglist [16]byte /* sigset */
+ Sigmask [16]byte /* sigset */
+ Sigignore [16]byte /* sigset */
+ Sigcatch [16]byte /* sigset */
+ Uid uint32
+ Ruid uint32
+ Svuid uint32
+ Rgid uint32
+ Svgid uint32
+ Ngroups int16
+ Spare_short2 int16
+ Groups [16]uint32
+ Size uint32
+ Rssize int32
+ Swrss int32
+ Tsize int32
+ Dsize int32
+ Ssize int32
+ Xstat uint16
+ Acflag uint16
+ Pctcpu uint32
+ Estcpu uint32
+ Slptime uint32
+ Swtime uint32
+ Cow uint32
+ Runtime uint64
+ Start Timeval
+ Childtime Timeval
+ Flag int32
+ Kiflag int32
+ Traceflag int32
+ Stat int8
+ Nice int8
+ Lock int8
+ Rqindex int8
+ Oncpu uint8
+ Lastcpu uint8
+ Tdname [17]int8
+ Wmesg [9]int8
+ Login [18]int8
+ Lockname [9]int8
+ Comm [20]int8
+ Emul [17]int8
+ Loginclass [18]int8
+ Sparestrings [50]int8
+ Spareints [7]int32
+ Flag2 int32
+ Fibnum int32
+ Cr_flags uint32
+ Jid int32
+ Numthreads int32
+ Tid int32
+ Pri Priority
+ Rusage Rusage
+ Rusage_ch Rusage
+ Pcb int32 /* pcb */
+ Kstack int32
+ Udata int32
+ Tdaddr int32 /* thread */
+ Spareptrs [6]int32
+ Sparelongs [12]int32
+ Sflag int32
+ Tdflags int32
+}
+
+type Priority struct {
+ Class uint8
+ Level uint8
+ Native uint8
+ User uint8
+}
+
+type KinfoVmentry struct {
+ Structsize int32
+ Type int32
+ Start uint64
+ End uint64
+ Offset uint64
+ Vn_fileid uint64
+ Vn_fsid uint32
+ Flags int32
+ Resident int32
+ Private_resident int32
+ Protection int32
+ Ref_count int32
+ Shadow_count int32
+ Vn_type int32
+ Vn_size uint64
+ Vn_rdev uint32
+ Vn_mode uint16
+ Status uint16
+ X_kve_ispare [12]int32
+ Path [1024]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go
new file mode 100644
index 000000000..560e627d2
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go
@@ -0,0 +1,192 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 14
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 7
+)
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x488
+ sizeOfKinfoProc = 0x440
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SWAIT = 6
+ SLOCK = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur int64
+ Max int64
+}
+
+type KinfoProc struct {
+ Structsize int32
+ Layout int32
+ Args int64 /* pargs */
+ Paddr int64 /* proc */
+ Addr int64 /* user */
+ Tracep int64 /* vnode */
+ Textvp int64 /* vnode */
+ Fd int64 /* filedesc */
+ Vmspace int64 /* vmspace */
+ Wchan int64
+ Pid int32
+ Ppid int32
+ Pgid int32
+ Tpgid int32
+ Sid int32
+ Tsid int32
+ Jobc int16
+ Spare_short1 int16
+ Tdev uint32
+ Siglist [16]byte /* sigset */
+ Sigmask [16]byte /* sigset */
+ Sigignore [16]byte /* sigset */
+ Sigcatch [16]byte /* sigset */
+ Uid uint32
+ Ruid uint32
+ Svuid uint32
+ Rgid uint32
+ Svgid uint32
+ Ngroups int16
+ Spare_short2 int16
+ Groups [16]uint32
+ Size uint64
+ Rssize int64
+ Swrss int64
+ Tsize int64
+ Dsize int64
+ Ssize int64
+ Xstat uint16
+ Acflag uint16
+ Pctcpu uint32
+ Estcpu uint32
+ Slptime uint32
+ Swtime uint32
+ Cow uint32
+ Runtime uint64
+ Start Timeval
+ Childtime Timeval
+ Flag int64
+ Kiflag int64
+ Traceflag int32
+ Stat int8
+ Nice int8
+ Lock int8
+ Rqindex int8
+ Oncpu uint8
+ Lastcpu uint8
+ Tdname [17]int8
+ Wmesg [9]int8
+ Login [18]int8
+ Lockname [9]int8
+ Comm [20]int8
+ Emul [17]int8
+ Loginclass [18]int8
+ Sparestrings [50]int8
+ Spareints [7]int32
+ Flag2 int32
+ Fibnum int32
+ Cr_flags uint32
+ Jid int32
+ Numthreads int32
+ Tid int32
+ Pri Priority
+ Rusage Rusage
+ Rusage_ch Rusage
+ Pcb int64 /* pcb */
+ Kstack int64
+ Udata int64
+ Tdaddr int64 /* thread */
+ Spareptrs [6]int64
+ Sparelongs [12]int64
+ Sflag int64
+ Tdflags int64
+}
+
+type Priority struct {
+ Class uint8
+ Level uint8
+ Native uint8
+ User uint8
+}
+
+type KinfoVmentry struct {
+ Structsize int32
+ Type int32
+ Start uint64
+ End uint64
+ Offset uint64
+ Vn_fileid uint64
+ Vn_fsid uint32
+ Flags int32
+ Resident int32
+ Private_resident int32
+ Protection int32
+ Ref_count int32
+ Shadow_count int32
+ Vn_type int32
+ Vn_size uint64
+ Vn_rdev uint32
+ Vn_mode uint16
+ Status uint16
+ X_kve_ispare [12]int32
+ Path [1024]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go
new file mode 100644
index 000000000..81ae0b9a8
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go
@@ -0,0 +1,192 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 14
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 7
+)
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x488
+ sizeOfKinfoProc = 0x440
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SWAIT = 6
+ SLOCK = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int32
+ Ixrss int32
+ Idrss int32
+ Isrss int32
+ Minflt int32
+ Majflt int32
+ Nswap int32
+ Inblock int32
+ Oublock int32
+ Msgsnd int32
+ Msgrcv int32
+ Nsignals int32
+ Nvcsw int32
+ Nivcsw int32
+}
+
+type Rlimit struct {
+ Cur int32
+ Max int32
+}
+
+type KinfoProc struct {
+ Structsize int32
+ Layout int32
+ Args int32 /* pargs */
+ Paddr int32 /* proc */
+ Addr int32 /* user */
+ Tracep int32 /* vnode */
+ Textvp int32 /* vnode */
+ Fd int32 /* filedesc */
+ Vmspace int32 /* vmspace */
+ Wchan int32
+ Pid int32
+ Ppid int32
+ Pgid int32
+ Tpgid int32
+ Sid int32
+ Tsid int32
+ Jobc int16
+ Spare_short1 int16
+ Tdev uint32
+ Siglist [16]byte /* sigset */
+ Sigmask [16]byte /* sigset */
+ Sigignore [16]byte /* sigset */
+ Sigcatch [16]byte /* sigset */
+ Uid uint32
+ Ruid uint32
+ Svuid uint32
+ Rgid uint32
+ Svgid uint32
+ Ngroups int16
+ Spare_short2 int16
+ Groups [16]uint32
+ Size uint32
+ Rssize int32
+ Swrss int32
+ Tsize int32
+ Dsize int32
+ Ssize int32
+ Xstat uint16
+ Acflag uint16
+ Pctcpu uint32
+ Estcpu uint32
+ Slptime uint32
+ Swtime uint32
+ Cow uint32
+ Runtime uint64
+ Start Timeval
+ Childtime Timeval
+ Flag int32
+ Kiflag int32
+ Traceflag int32
+ Stat int8
+ Nice int8
+ Lock int8
+ Rqindex int8
+ Oncpu uint8
+ Lastcpu uint8
+ Tdname [17]int8
+ Wmesg [9]int8
+ Login [18]int8
+ Lockname [9]int8
+ Comm [20]int8
+ Emul [17]int8
+ Loginclass [18]int8
+ Sparestrings [50]int8
+ Spareints [4]int32
+ Flag2 int32
+ Fibnum int32
+ Cr_flags uint32
+ Jid int32
+ Numthreads int32
+ Tid int32
+ Pri Priority
+ Rusage Rusage
+ Rusage_ch Rusage
+ Pcb int32 /* pcb */
+ Kstack int32
+ Udata int32
+ Tdaddr int32 /* thread */
+ Spareptrs [6]int64
+ Sparelongs [12]int64
+ Sflag int64
+ Tdflags int64
+}
+
+type Priority struct {
+ Class uint8
+ Level uint8
+ Native uint8
+ User uint8
+}
+
+type KinfoVmentry struct {
+ Structsize int32
+ Type int32
+ Start uint64
+ End uint64
+ Offset uint64
+ Vn_fileid uint64
+ Vn_fsid uint32
+ Flags int32
+ Resident int32
+ Private_resident int32
+ Protection int32
+ Ref_count int32
+ Shadow_count int32
+ Vn_type int32
+ Vn_size uint64
+ Vn_rdev uint32
+ Vn_mode uint16
+ Status uint16
+ X_kve_ispare [12]int32
+ Path [1024]int8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go
new file mode 100644
index 000000000..effd470a0
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go
@@ -0,0 +1,202 @@
+//go:build freebsd && arm64
+// +build freebsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs process/types_freebsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 14
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 7
+)
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x488
+ sizeOfKinfoProc = 0x440
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SWAIT = 6
+ SLOCK = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur int64
+ Max int64
+}
+
+type KinfoProc struct {
+ Structsize int32
+ Layout int32
+ Args *int64 /* pargs */
+ Paddr *int64 /* proc */
+ Addr *int64 /* user */
+ Tracep *int64 /* vnode */
+ Textvp *int64 /* vnode */
+ Fd *int64 /* filedesc */
+ Vmspace *int64 /* vmspace */
+ Wchan *byte
+ Pid int32
+ Ppid int32
+ Pgid int32
+ Tpgid int32
+ Sid int32
+ Tsid int32
+ Jobc int16
+ Spare_short1 int16
+ Tdev_freebsd11 uint32
+ Siglist [16]byte /* sigset */
+ Sigmask [16]byte /* sigset */
+ Sigignore [16]byte /* sigset */
+ Sigcatch [16]byte /* sigset */
+ Uid uint32
+ Ruid uint32
+ Svuid uint32
+ Rgid uint32
+ Svgid uint32
+ Ngroups int16
+ Spare_short2 int16
+ Groups [16]uint32
+ Size uint64
+ Rssize int64
+ Swrss int64
+ Tsize int64
+ Dsize int64
+ Ssize int64
+ Xstat uint16
+ Acflag uint16
+ Pctcpu uint32
+ Estcpu uint32
+ Slptime uint32
+ Swtime uint32
+ Cow uint32
+ Runtime uint64
+ Start Timeval
+ Childtime Timeval
+ Flag int64
+ Kiflag int64
+ Traceflag int32
+ Stat uint8
+ Nice int8
+ Lock uint8
+ Rqindex uint8
+ Oncpu_old uint8
+ Lastcpu_old uint8
+ Tdname [17]uint8
+ Wmesg [9]uint8
+ Login [18]uint8
+ Lockname [9]uint8
+ Comm [20]int8
+ Emul [17]uint8
+ Loginclass [18]uint8
+ Moretdname [4]uint8
+ Sparestrings [46]uint8
+ Spareints [2]int32
+ Tdev uint64
+ Oncpu int32
+ Lastcpu int32
+ Tracer int32
+ Flag2 int32
+ Fibnum int32
+ Cr_flags uint32
+ Jid int32
+ Numthreads int32
+ Tid int32
+ Pri Priority
+ Rusage Rusage
+ Rusage_ch Rusage
+ Pcb *int64 /* pcb */
+ Kstack *byte
+ Udata *byte
+ Tdaddr *int64 /* thread */
+ Spareptrs [6]*byte
+ Sparelongs [12]int64
+ Sflag int64
+ Tdflags int64
+}
+
+type Priority struct {
+ Class uint8
+ Level uint8
+ Native uint8
+ User uint8
+}
+
+type KinfoVmentry struct {
+ Structsize int32
+ Type int32
+ Start uint64
+ End uint64
+ Offset uint64
+ Vn_fileid uint64
+ Vn_fsid_freebsd11 uint32
+ Flags int32
+ Resident int32
+ Private_resident int32
+ Protection int32
+ Ref_count int32
+ Shadow_count int32
+ Vn_type int32
+ Vn_size uint64
+ Vn_rdev_freebsd11 uint32
+ Vn_mode uint16
+ Status uint16
+ Vn_fsid uint64
+ Vn_rdev uint64
+ X_kve_ispare [8]int32
+ Path [1024]uint8
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go
new file mode 100644
index 000000000..d5b5bc329
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go
@@ -0,0 +1,1189 @@
+//go:build linux
+// +build linux
+
+package process
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/net"
+ "github.com/tklauser/go-sysconf"
+ "golang.org/x/sys/unix"
+)
+
+var pageSize = uint64(os.Getpagesize())
+
+const prioProcess = 0 // linux/resource.h
+
+var clockTicks = 100 // default value
+
+func init() {
+ clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
+ // ignore errors
+ if err == nil {
+ clockTicks = int(clkTck)
+ }
+}
+
+// MemoryInfoExStat is different between OSes
+type MemoryInfoExStat struct {
+ RSS uint64 `json:"rss"` // bytes
+ VMS uint64 `json:"vms"` // bytes
+ Shared uint64 `json:"shared"` // bytes
+ Text uint64 `json:"text"` // bytes
+ Lib uint64 `json:"lib"` // bytes
+ Data uint64 `json:"data"` // bytes
+ Dirty uint64 `json:"dirty"` // bytes
+}
+
+func (m MemoryInfoExStat) String() string {
+ s, _ := json.Marshal(m)
+ return string(s)
+}
+
+type MemoryMapsStat struct {
+ Path string `json:"path"`
+ Rss uint64 `json:"rss"`
+ Size uint64 `json:"size"`
+ Pss uint64 `json:"pss"`
+ SharedClean uint64 `json:"sharedClean"`
+ SharedDirty uint64 `json:"sharedDirty"`
+ PrivateClean uint64 `json:"privateClean"`
+ PrivateDirty uint64 `json:"privateDirty"`
+ Referenced uint64 `json:"referenced"`
+ Anonymous uint64 `json:"anonymous"`
+ Swap uint64 `json:"swap"`
+}
+
+// String returns JSON value of the process.
+func (m MemoryMapsStat) String() string {
+ s, _ := json.Marshal(m)
+ return string(s)
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ _, ppid, _, _, _, _, _, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return -1, err
+ }
+ return ppid, nil
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ if p.name == "" {
+ if err := p.fillNameWithContext(ctx); err != nil {
+ return "", err
+ }
+ }
+ return p.name, nil
+}
+
+func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
+ if p.tgid == 0 {
+ if err := p.fillFromStatusWithContext(ctx); err != nil {
+ return 0, err
+ }
+ }
+ return p.tgid, nil
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ return p.fillFromExeWithContext()
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ return p.fillFromCmdlineWithContext(ctx)
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ return p.fillSliceFromCmdlineWithContext(ctx)
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ _, _, _, createTime, _, _, _, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return createTime, nil
+}
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return p.fillFromCwdWithContext()
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ err := p.fillFromStatusWithContext(ctx)
+ if err != nil {
+ return []string{""}, err
+ }
+ return []string{p.status}, nil
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details
+ pid := p.Pid
+ statPath := common.HostProc(strconv.Itoa(int(pid)), "stat")
+ contents, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return false, err
+ }
+ fields := strings.Fields(string(contents))
+ if len(fields) < 8 {
+ return false, fmt.Errorf("insufficient data in %s", statPath)
+ }
+ pgid := fields[4]
+ tpgid := fields[7]
+ return pgid == tpgid, nil
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ err := p.fillFromStatusWithContext(ctx)
+ if err != nil {
+ return []int32{}, err
+ }
+ return p.uids, nil
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ err := p.fillFromStatusWithContext(ctx)
+ if err != nil {
+ return []int32{}, err
+ }
+ return p.gids, nil
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ err := p.fillFromStatusWithContext(ctx)
+ if err != nil {
+ return []int32{}, err
+ }
+ return p.groups, nil
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ t, _, _, _, _, _, _, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ termmap, err := getTerminalMap()
+ if err != nil {
+ return "", err
+ }
+ terminal := termmap[t]
+ return terminal, nil
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ _, _, _, _, _, nice, _, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return nice, nil
+}
+
+func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
+ return p.RlimitUsageWithContext(ctx, false)
+}
+
+func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
+ rlimits, err := p.fillFromLimitsWithContext()
+ if !gatherUsed || err != nil {
+ return rlimits, err
+ }
+
+ _, _, _, _, rtprio, nice, _, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if err := p.fillFromStatusWithContext(ctx); err != nil {
+ return nil, err
+ }
+
+ for i := range rlimits {
+ rs := &rlimits[i]
+ switch rs.Resource {
+ case RLIMIT_CPU:
+ times, err := p.TimesWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ rs.Used = uint64(times.User + times.System)
+ case RLIMIT_DATA:
+ rs.Used = uint64(p.memInfo.Data)
+ case RLIMIT_STACK:
+ rs.Used = uint64(p.memInfo.Stack)
+ case RLIMIT_RSS:
+ rs.Used = uint64(p.memInfo.RSS)
+ case RLIMIT_NOFILE:
+ n, err := p.NumFDsWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ rs.Used = uint64(n)
+ case RLIMIT_MEMLOCK:
+ rs.Used = uint64(p.memInfo.Locked)
+ case RLIMIT_AS:
+ rs.Used = uint64(p.memInfo.VMS)
+ case RLIMIT_LOCKS:
+ // TODO we can get the used value from /proc/$pid/locks. But linux doesn't enforce it, so not a high priority.
+ case RLIMIT_SIGPENDING:
+ rs.Used = p.sigInfo.PendingProcess
+ case RLIMIT_NICE:
+ // The rlimit for nice is a little unusual, in that 0 means the niceness cannot be decreased beyond the current value, but it can be increased.
+ // So effectively: if rs.Soft == 0 { rs.Soft = rs.Used }
+ rs.Used = uint64(nice)
+ case RLIMIT_RTPRIO:
+ rs.Used = uint64(rtprio)
+ }
+ }
+
+ return rlimits, err
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ return p.fillFromIOWithContext()
+}
+
+func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
+ err := p.fillFromStatusWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return p.numCtxSwitches, nil
+}
+
+func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
+ _, fnames, err := p.fillFromfdListWithContext(ctx)
+ return int32(len(fnames)), err
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ err := p.fillFromStatusWithContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ return p.numThreads, nil
+}
+
+func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
+ ret := make(map[int32]*cpu.TimesStat)
+ taskPath := common.HostProc(strconv.Itoa(int(p.Pid)), "task")
+
+ tids, err := readPidsFromDir(taskPath)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, tid := range tids {
+ _, _, cpuTimes, _, _, _, _, err := p.fillFromTIDStatWithContext(ctx, tid)
+ if err != nil {
+ return nil, err
+ }
+ ret[tid] = cpuTimes
+ }
+
+ return ret, nil
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ _, _, cpuTimes, _, _, _, _, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return cpuTimes, nil
+}
+
+func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ meminfo, _, err := p.fillFromStatmWithContext()
+ if err != nil {
+ return nil, err
+ }
+ return meminfo, nil
+}
+
+func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
+ _, memInfoEx, err := p.fillFromStatmWithContext()
+ if err != nil {
+ return nil, err
+ }
+ return memInfoEx, nil
+}
+
+func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) {
+ _, _, _, _, _, _, pageFaults, err := p.fillFromStatWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return pageFaults, nil
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ if len(pids) == 0 {
+ return nil, ErrorNoChildren
+ }
+ ret := make([]*Process, 0, len(pids))
+ for _, pid := range pids {
+ np, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, np)
+ }
+ return ret, nil
+}
+
+func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) {
+ _, ofs, err := p.fillFromfdWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]OpenFilesStat, len(ofs))
+ for i, o := range ofs {
+ ret[i] = *o
+ }
+
+ return ret, nil
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return net.ConnectionsPidWithContext(ctx, "all", p.Pid)
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max)
+}
+
+func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
+ pid := p.Pid
+ var ret []MemoryMapsStat
+ smapsPath := common.HostProc(strconv.Itoa(int(pid)), "smaps")
+ if grouped {
+ ret = make([]MemoryMapsStat, 1)
+ // If smaps_rollup exists (require kernel >= 4.15), then we will use it
+ // for pre-summed memory information for a process.
+ smapsRollupPath := common.HostProc(strconv.Itoa(int(pid)), "smaps_rollup")
+ if _, err := os.Stat(smapsRollupPath); !os.IsNotExist(err) {
+ smapsPath = smapsRollupPath
+ }
+ }
+ contents, err := ioutil.ReadFile(smapsPath)
+ if err != nil {
+ return nil, err
+ }
+ lines := strings.Split(string(contents), "\n")
+
+ // function of parsing a block
+ getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) {
+ m := MemoryMapsStat{}
+ m.Path = firstLine[len(firstLine)-1]
+
+ for _, line := range block {
+ if strings.Contains(line, "VmFlags") {
+ continue
+ }
+ field := strings.Split(line, ":")
+ if len(field) < 2 {
+ continue
+ }
+ v := strings.Trim(field[1], "kB") // remove last "kB"
+ v = strings.TrimSpace(v)
+ t, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return m, err
+ }
+
+ switch field[0] {
+ case "Size":
+ m.Size = t
+ case "Rss":
+ m.Rss = t
+ case "Pss":
+ m.Pss = t
+ case "Shared_Clean":
+ m.SharedClean = t
+ case "Shared_Dirty":
+ m.SharedDirty = t
+ case "Private_Clean":
+ m.PrivateClean = t
+ case "Private_Dirty":
+ m.PrivateDirty = t
+ case "Referenced":
+ m.Referenced = t
+ case "Anonymous":
+ m.Anonymous = t
+ case "Swap":
+ m.Swap = t
+ }
+ }
+ return m, nil
+ }
+
+ var firstLine []string
+ blocks := make([]string, 0, 16)
+
+ for i, line := range lines {
+ fields := strings.Fields(line)
+ if (len(fields) > 0 && !strings.HasSuffix(fields[0], ":")) || i == len(lines)-1 {
+ // new block section
+ if len(firstLine) > 0 && len(blocks) > 0 {
+ g, err := getBlock(firstLine, blocks)
+ if err != nil {
+ return &ret, err
+ }
+ if grouped {
+ ret[0].Size += g.Size
+ ret[0].Rss += g.Rss
+ ret[0].Pss += g.Pss
+ ret[0].SharedClean += g.SharedClean
+ ret[0].SharedDirty += g.SharedDirty
+ ret[0].PrivateClean += g.PrivateClean
+ ret[0].PrivateDirty += g.PrivateDirty
+ ret[0].Referenced += g.Referenced
+ ret[0].Anonymous += g.Anonymous
+ ret[0].Swap += g.Swap
+ } else {
+ ret = append(ret, g)
+ }
+ }
+ // starts new block
+ blocks = make([]string, 0, 16)
+ firstLine = fields
+ } else {
+ blocks = append(blocks, line)
+ }
+ }
+
+ return &ret, nil
+}
+
+func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
+ environPath := common.HostProc(strconv.Itoa(int(p.Pid)), "environ")
+
+ environContent, err := ioutil.ReadFile(environPath)
+ if err != nil {
+ return nil, err
+ }
+
+ return strings.Split(string(environContent), "\000"), nil
+}
+
+/**
+** Internal functions
+**/
+
+func limitToUint(val string) (uint64, error) {
+ if val == "unlimited" {
+ return math.MaxUint64, nil
+ }
+ res, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return res, nil
+}
+
+// Get num_fds from /proc/(pid)/limits
+func (p *Process) fillFromLimitsWithContext() ([]RlimitStat, error) {
+ pid := p.Pid
+ limitsFile := common.HostProc(strconv.Itoa(int(pid)), "limits")
+ d, err := os.Open(limitsFile)
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ var limitStats []RlimitStat
+
+ limitsScanner := bufio.NewScanner(d)
+ for limitsScanner.Scan() {
+ var statItem RlimitStat
+
+ str := strings.Fields(limitsScanner.Text())
+
+ // Remove the header line
+ if strings.Contains(str[len(str)-1], "Units") {
+ continue
+ }
+
+ // Assert that last item is a Hard limit
+ statItem.Hard, err = limitToUint(str[len(str)-1])
+ if err != nil {
+ // On error remove last item and try once again since it can be unit or header line
+ str = str[:len(str)-1]
+ statItem.Hard, err = limitToUint(str[len(str)-1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Remove last item from string
+ str = str[:len(str)-1]
+
+ // Now last item is a Soft limit
+ statItem.Soft, err = limitToUint(str[len(str)-1])
+ if err != nil {
+ return nil, err
+ }
+ // Remove last item from string
+ str = str[:len(str)-1]
+
+ // The rest is a stats name
+ resourceName := strings.Join(str, " ")
+ switch resourceName {
+ case "Max cpu time":
+ statItem.Resource = RLIMIT_CPU
+ case "Max file size":
+ statItem.Resource = RLIMIT_FSIZE
+ case "Max data size":
+ statItem.Resource = RLIMIT_DATA
+ case "Max stack size":
+ statItem.Resource = RLIMIT_STACK
+ case "Max core file size":
+ statItem.Resource = RLIMIT_CORE
+ case "Max resident set":
+ statItem.Resource = RLIMIT_RSS
+ case "Max processes":
+ statItem.Resource = RLIMIT_NPROC
+ case "Max open files":
+ statItem.Resource = RLIMIT_NOFILE
+ case "Max locked memory":
+ statItem.Resource = RLIMIT_MEMLOCK
+ case "Max address space":
+ statItem.Resource = RLIMIT_AS
+ case "Max file locks":
+ statItem.Resource = RLIMIT_LOCKS
+ case "Max pending signals":
+ statItem.Resource = RLIMIT_SIGPENDING
+ case "Max msgqueue size":
+ statItem.Resource = RLIMIT_MSGQUEUE
+ case "Max nice priority":
+ statItem.Resource = RLIMIT_NICE
+ case "Max realtime priority":
+ statItem.Resource = RLIMIT_RTPRIO
+ case "Max realtime timeout":
+ statItem.Resource = RLIMIT_RTTIME
+ default:
+ continue
+ }
+
+ limitStats = append(limitStats, statItem)
+ }
+
+ if err := limitsScanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return limitStats, nil
+}
+
+// Get list of /proc/(pid)/fd files
+func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) {
+ pid := p.Pid
+ statPath := common.HostProc(strconv.Itoa(int(pid)), "fd")
+ d, err := os.Open(statPath)
+ if err != nil {
+ return statPath, []string{}, err
+ }
+ defer d.Close()
+ fnames, err := d.Readdirnames(-1)
+ return statPath, fnames, err
+}
+
+// Get num_fds from /proc/(pid)/fd
+func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFilesStat, error) {
+ statPath, fnames, err := p.fillFromfdListWithContext(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ numFDs := int32(len(fnames))
+
+ var openfiles []*OpenFilesStat
+ for _, fd := range fnames {
+ fpath := filepath.Join(statPath, fd)
+ filepath, err := os.Readlink(fpath)
+ if err != nil {
+ continue
+ }
+ t, err := strconv.ParseUint(fd, 10, 64)
+ if err != nil {
+ return numFDs, openfiles, err
+ }
+ o := &OpenFilesStat{
+ Path: filepath,
+ Fd: t,
+ }
+ openfiles = append(openfiles, o)
+ }
+
+ return numFDs, openfiles, nil
+}
+
+// Get cwd from /proc/(pid)/cwd
+func (p *Process) fillFromCwdWithContext() (string, error) {
+ pid := p.Pid
+ cwdPath := common.HostProc(strconv.Itoa(int(pid)), "cwd")
+ cwd, err := os.Readlink(cwdPath)
+ if err != nil {
+ return "", err
+ }
+ return string(cwd), nil
+}
+
+// Get exe from /proc/(pid)/exe
+func (p *Process) fillFromExeWithContext() (string, error) {
+ pid := p.Pid
+ exePath := common.HostProc(strconv.Itoa(int(pid)), "exe")
+ exe, err := os.Readlink(exePath)
+ if err != nil {
+ return "", err
+ }
+ return string(exe), nil
+}
+
+// Get cmdline from /proc/(pid)/cmdline
+func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) {
+ pid := p.Pid
+ cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
+ cmdline, err := ioutil.ReadFile(cmdPath)
+ if err != nil {
+ return "", err
+ }
+ ret := strings.FieldsFunc(string(cmdline), func(r rune) bool {
+ return r == '\u0000'
+ })
+
+ return strings.Join(ret, " "), nil
+}
+
+func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) {
+ pid := p.Pid
+ cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
+ cmdline, err := ioutil.ReadFile(cmdPath)
+ if err != nil {
+ return nil, err
+ }
+ if len(cmdline) == 0 {
+ return nil, nil
+ }
+
+ cmdline = bytes.TrimRight(cmdline, "\x00")
+
+ parts := bytes.Split(cmdline, []byte{0})
+ var strParts []string
+ for _, p := range parts {
+ strParts = append(strParts, string(p))
+ }
+
+ return strParts, nil
+}
+
+// Get IO status from /proc/(pid)/io
+func (p *Process) fillFromIOWithContext() (*IOCountersStat, error) {
+ pid := p.Pid
+ ioPath := common.HostProc(strconv.Itoa(int(pid)), "io")
+ ioline, err := ioutil.ReadFile(ioPath)
+ if err != nil {
+ return nil, err
+ }
+ lines := strings.Split(string(ioline), "\n")
+ ret := &IOCountersStat{}
+
+ for _, line := range lines {
+ field := strings.Fields(line)
+ if len(field) < 2 {
+ continue
+ }
+ t, err := strconv.ParseUint(field[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ param := strings.TrimSuffix(field[0], ":")
+ switch param {
+ case "syscr":
+ ret.ReadCount = t
+ case "syscw":
+ ret.WriteCount = t
+ case "read_bytes":
+ ret.ReadBytes = t
+ case "write_bytes":
+ ret.WriteBytes = t
+ }
+ }
+
+ return ret, nil
+}
+
+// Get memory info from /proc/(pid)/statm
+func (p *Process) fillFromStatmWithContext() (*MemoryInfoStat, *MemoryInfoExStat, error) {
+ pid := p.Pid
+ memPath := common.HostProc(strconv.Itoa(int(pid)), "statm")
+ contents, err := ioutil.ReadFile(memPath)
+ if err != nil {
+ return nil, nil, err
+ }
+ fields := strings.Split(string(contents), " ")
+
+ vms, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ rss, err := strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ memInfo := &MemoryInfoStat{
+ RSS: rss * pageSize,
+ VMS: vms * pageSize,
+ }
+
+ shared, err := strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ text, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ lib, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ dirty, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ memInfoEx := &MemoryInfoExStat{
+ RSS: rss * pageSize,
+ VMS: vms * pageSize,
+ Shared: shared * pageSize,
+ Text: text * pageSize,
+ Lib: lib * pageSize,
+ Dirty: dirty * pageSize,
+ }
+
+ return memInfo, memInfoEx, nil
+}
+
+// Get name from /proc/(pid)/comm or /proc/(pid)/status
+func (p *Process) fillNameWithContext(ctx context.Context) error {
+ err := p.fillFromCommWithContext()
+ if err == nil && p.name != "" && len(p.name) < 15 {
+ return nil
+ }
+ return p.fillFromStatusWithContext(ctx)
+}
+
+// Get name from /proc/(pid)/comm
+func (p *Process) fillFromCommWithContext() error {
+ pid := p.Pid
+ statPath := common.HostProc(strconv.Itoa(int(pid)), "comm")
+ contents, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return err
+ }
+
+ p.name = strings.TrimSuffix(string(contents), "\n")
+ return nil
+}
+
+// Get various status from /proc/(pid)/status
+func (p *Process) fillFromStatus() error {
+ return p.fillFromStatusWithContext(context.Background())
+}
+
+func (p *Process) fillFromStatusWithContext(ctx context.Context) error {
+ pid := p.Pid
+ statPath := common.HostProc(strconv.Itoa(int(pid)), "status")
+ contents, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return err
+ }
+ lines := strings.Split(string(contents), "\n")
+ p.numCtxSwitches = &NumCtxSwitchesStat{}
+ p.memInfo = &MemoryInfoStat{}
+ p.sigInfo = &SignalInfoStat{}
+ for _, line := range lines {
+ tabParts := strings.SplitN(line, "\t", 2)
+ if len(tabParts) < 2 {
+ continue
+ }
+ value := tabParts[1]
+ switch strings.TrimRight(tabParts[0], ":") {
+ case "Name":
+ p.name = strings.Trim(value, " \t")
+ if len(p.name) >= 15 {
+ cmdlineSlice, err := p.CmdlineSliceWithContext(ctx)
+ if err != nil {
+ return err
+ }
+ if len(cmdlineSlice) > 0 {
+ extendedName := filepath.Base(cmdlineSlice[0])
+ if strings.HasPrefix(extendedName, p.name) {
+ p.name = extendedName
+ } else {
+ p.name = cmdlineSlice[0]
+ }
+ }
+ }
+ // Ensure we have a copy and not reference into slice
+ p.name = string([]byte(p.name))
+ case "State":
+ p.status = convertStatusChar(value[0:1])
+ // Ensure we have a copy and not reference into slice
+ p.status = string([]byte(p.status))
+ case "PPid", "Ppid":
+ pval, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.parent = int32(pval)
+ case "Tgid":
+ pval, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.tgid = int32(pval)
+ case "Uid":
+ p.uids = make([]int32, 0, 4)
+ for _, i := range strings.Split(value, "\t") {
+ v, err := strconv.ParseInt(i, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.uids = append(p.uids, int32(v))
+ }
+ case "Gid":
+ p.gids = make([]int32, 0, 4)
+ for _, i := range strings.Split(value, "\t") {
+ v, err := strconv.ParseInt(i, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.gids = append(p.gids, int32(v))
+ }
+ case "Groups":
+ groups := strings.Fields(value)
+ p.groups = make([]int32, 0, len(groups))
+ for _, i := range groups {
+ v, err := strconv.ParseInt(i, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.groups = append(p.groups, int32(v))
+ }
+ case "Threads":
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ p.numThreads = int32(v)
+ case "voluntary_ctxt_switches":
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.numCtxSwitches.Voluntary = v
+ case "nonvoluntary_ctxt_switches":
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.numCtxSwitches.Involuntary = v
+ case "VmRSS":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.RSS = v * 1024
+ case "VmSize":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.VMS = v * 1024
+ case "VmSwap":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.Swap = v * 1024
+ case "VmHWM":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.HWM = v * 1024
+ case "VmData":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.Data = v * 1024
+ case "VmStk":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.Stack = v * 1024
+ case "VmLck":
+ value := strings.Trim(value, " kB") // remove last "kB"
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ p.memInfo.Locked = v * 1024
+ case "SigPnd":
+ if len(value) > 16 {
+ value = value[len(value)-16:]
+ }
+ v, err := strconv.ParseUint(value, 16, 64)
+ if err != nil {
+ return err
+ }
+ p.sigInfo.PendingThread = v
+ case "ShdPnd":
+ if len(value) > 16 {
+ value = value[len(value)-16:]
+ }
+ v, err := strconv.ParseUint(value, 16, 64)
+ if err != nil {
+ return err
+ }
+ p.sigInfo.PendingProcess = v
+ case "SigBlk":
+ if len(value) > 16 {
+ value = value[len(value)-16:]
+ }
+ v, err := strconv.ParseUint(value, 16, 64)
+ if err != nil {
+ return err
+ }
+ p.sigInfo.Blocked = v
+ case "SigIgn":
+ if len(value) > 16 {
+ value = value[len(value)-16:]
+ }
+ v, err := strconv.ParseUint(value, 16, 64)
+ if err != nil {
+ return err
+ }
+ p.sigInfo.Ignored = v
+ case "SigCgt":
+ if len(value) > 16 {
+ value = value[len(value)-16:]
+ }
+ v, err := strconv.ParseUint(value, 16, 64)
+ if err != nil {
+ return err
+ }
+ p.sigInfo.Caught = v
+ }
+
+ }
+ return nil
+}
+
+func (p *Process) fillFromTIDStat(tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) {
+ return p.fillFromTIDStatWithContext(context.Background(), tid)
+}
+
+func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) {
+ pid := p.Pid
+ var statPath string
+
+ if tid == -1 {
+ statPath = common.HostProc(strconv.Itoa(int(pid)), "stat")
+ } else {
+ statPath = common.HostProc(strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat")
+ }
+
+ contents, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ // Indexing from one, as described in `man proc` about the file /proc/[pid]/stat
+ fields := splitProcStat(contents)
+
+ terminal, err := strconv.ParseUint(fields[7], 10, 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+
+ ppid, err := strconv.ParseInt(fields[4], 10, 32)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ utime, err := strconv.ParseFloat(fields[14], 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+
+ stime, err := strconv.ParseFloat(fields[15], 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+
+ // There is no such thing as iotime in stat file. As an approximation, we
+ // will use delayacct_blkio_ticks (aggregated block I/O delays, as per Linux
+ // docs). Note: I am assuming at least Linux 2.6.18
+ var iotime float64
+ if len(fields) > 42 {
+ iotime, err = strconv.ParseFloat(fields[42], 64)
+ if err != nil {
+ iotime = 0 // Ancient linux version, most likely
+ }
+ } else {
+ iotime = 0 // e.g. SmartOS containers
+ }
+
+ cpuTimes := &cpu.TimesStat{
+ CPU: "cpu",
+ User: utime / float64(clockTicks),
+ System: stime / float64(clockTicks),
+ Iowait: iotime / float64(clockTicks),
+ }
+
+ bootTime, _ := common.BootTimeWithContext(ctx)
+ t, err := strconv.ParseUint(fields[22], 10, 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ ctime := (t / uint64(clockTicks)) + uint64(bootTime)
+ createTime := int64(ctime * 1000)
+
+ rtpriority, err := strconv.ParseInt(fields[18], 10, 32)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ if rtpriority < 0 {
+ rtpriority = rtpriority*-1 - 1
+ } else {
+ rtpriority = 0
+ }
+
+ // p.Nice = mustParseInt32(fields[18])
+ // use syscall instead of parse Stat file
+ snice, _ := unix.Getpriority(prioProcess, int(pid))
+ nice := int32(snice) // FIXME: is this true?
+
+ minFault, err := strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ cMinFault, err := strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ majFault, err := strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+ cMajFault, err := strconv.ParseUint(fields[13], 10, 64)
+ if err != nil {
+ return 0, 0, nil, 0, 0, 0, nil, err
+ }
+
+ faults := &PageFaultsStat{
+ MinorFaults: minFault,
+ MajorFaults: majFault,
+ ChildMinorFaults: cMinFault,
+ ChildMajorFaults: cMajFault,
+ }
+
+ return terminal, int32(ppid), cpuTimes, createTime, uint32(rtpriority), nice, faults, nil
+}
+
+func (p *Process) fillFromStatWithContext(ctx context.Context) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) {
+ return p.fillFromTIDStatWithContext(ctx, -1)
+}
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ return readPidsFromDir(common.HostProc())
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ out := []*Process{}
+
+ pids, err := PidsWithContext(ctx)
+ if err != nil {
+ return out, err
+ }
+
+ for _, pid := range pids {
+ p, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ continue
+ }
+ out = append(out, p)
+ }
+
+ return out, nil
+}
+
+func readPidsFromDir(path string) ([]int32, error) {
+ var ret []int32
+
+ d, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ fnames, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, fname := range fnames {
+ pid, err := strconv.ParseInt(fname, 10, 32)
+ if err != nil {
+ // if not numeric name, just skip
+ continue
+ }
+ ret = append(ret, int32(pid))
+ }
+
+ return ret, nil
+}
+
+func splitProcStat(content []byte) []string {
+ nameStart := bytes.IndexByte(content, '(')
+ nameEnd := bytes.LastIndexByte(content, ')')
+ restFields := strings.Fields(string(content[nameEnd+2:])) // +2 skip ') '
+ name := content[nameStart+1 : nameEnd]
+ pid := strings.TrimSpace(string(content[:nameStart]))
+ fields := make([]string, 3, len(restFields)+3)
+ fields[1] = string(pid)
+ fields[2] = string(name)
+ fields = append(fields, restFields...)
+ return fields
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go
new file mode 100644
index 000000000..cbb1a77f6
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go
@@ -0,0 +1,389 @@
+//go:build openbsd
+// +build openbsd
+
+package process
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ cpu "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ mem "github.com/shirou/gopsutil/v3/mem"
+ net "github.com/shirou/gopsutil/v3/net"
+ "golang.org/x/sys/unix"
+)
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ var ret []int32
+ procs, err := ProcessesWithContext(ctx)
+ if err != nil {
+ return ret, nil
+ }
+
+ for _, p := range procs {
+ ret = append(ret, p.Pid)
+ }
+
+ return ret, nil
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+
+ return k.Ppid, nil
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return "", err
+ }
+ name := common.IntToString(k.Comm[:])
+
+ if len(name) >= 15 {
+ cmdlineSlice, err := p.CmdlineSliceWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(cmdlineSlice) > 0 {
+ extendedName := filepath.Base(cmdlineSlice[0])
+ if strings.HasPrefix(extendedName, p.name) {
+ name = extendedName
+ } else {
+ name = cmdlineSlice[0]
+ }
+ }
+ }
+
+ return name, nil
+}
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ mib := []int32{CTLKern, KernProcArgs, p.Pid, KernProcArgv}
+ buf, _, err := common.CallSyscall(mib)
+ if err != nil {
+ return nil, err
+ }
+
+ /* From man sysctl(2):
+ The buffer pointed to by oldp is filled with an array of char
+ pointers followed by the strings themselves. The last char
+ pointer is a NULL pointer. */
+ var strParts []string
+ r := bytes.NewReader(buf)
+ baseAddr := uintptr(unsafe.Pointer(&buf[0]))
+ for {
+ argvp, err := readPtr(r)
+ if err != nil {
+ return nil, err
+ }
+ if argvp == 0 { // check for a NULL pointer
+ break
+ }
+ offset := argvp - baseAddr
+ length := uintptr(bytes.IndexByte(buf[offset:], 0))
+ str := string(buf[offset : offset+length])
+ strParts = append(strParts, str)
+ }
+
+ return strParts, nil
+}
+
+// readPtr reads a pointer data from a given reader. WARNING: only little
+// endian architectures are supported.
+func readPtr(r io.Reader) (uintptr, error) {
+ switch sizeofPtr {
+ case 4:
+ var p uint32
+ if err := binary.Read(r, binary.LittleEndian, &p); err != nil {
+ return 0, err
+ }
+ return uintptr(p), nil
+ case 8:
+ var p uint64
+ if err := binary.Read(r, binary.LittleEndian, &p); err != nil {
+ return 0, err
+ }
+ return uintptr(p), nil
+ default:
+ return 0, fmt.Errorf("unsupported pointer size")
+ }
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ argv, err := p.CmdlineSliceWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ return strings.Join(argv, " "), nil
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return []string{""}, err
+ }
+ var s string
+ switch k.Stat {
+ case SIDL:
+ case SRUN:
+ case SONPROC:
+ s = Running
+ case SSLEEP:
+ s = Sleep
+ case SSTOP:
+ s = Stop
+ case SDEAD:
+ s = Zombie
+ }
+
+ return []string{s}, nil
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details
+ pid := p.Pid
+ out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid)))
+ if err != nil {
+ return false, err
+ }
+ return strings.IndexByte(string(out), '+') != -1, nil
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ uids := make([]int32, 0, 3)
+
+ uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid))
+
+ return uids, nil
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ gids := make([]int32, 0, 3)
+ gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid))
+
+ return gids, nil
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+
+ groups := make([]int32, k.Ngroups)
+ for i := int16(0); i < k.Ngroups; i++ {
+ groups[i] = int32(k.Groups[i])
+ }
+
+ return groups, nil
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return "", err
+ }
+
+ ttyNr := uint64(k.Tdev)
+
+ termmap, err := getTerminalMap()
+ if err != nil {
+ return "", err
+ }
+
+ return termmap[ttyNr], nil
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return 0, err
+ }
+ return int32(k.Nice), nil
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+ return &IOCountersStat{
+ ReadCount: uint64(k.Uru_inblock),
+ WriteCount: uint64(k.Uru_oublock),
+ }, nil
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ /* not supported, just return 1 */
+ return 1, nil
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+ return &cpu.TimesStat{
+ CPU: "cpu",
+ User: float64(k.Uutime_sec) + float64(k.Uutime_usec)/1000000,
+ System: float64(k.Ustime_sec) + float64(k.Ustime_usec)/1000000,
+ }, nil
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ k, err := p.getKProc()
+ if err != nil {
+ return nil, err
+ }
+ pageSize, err := mem.GetPageSizeWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return &MemoryInfoStat{
+ RSS: uint64(k.Vm_rssize) * pageSize,
+ VMS: uint64(k.Vm_tsize) + uint64(k.Vm_dsize) +
+ uint64(k.Vm_ssize),
+ }, nil
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]*Process, 0, len(pids))
+ for _, pid := range pids {
+ np, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, np)
+ }
+ return ret, nil
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ results := []*Process{}
+
+ buf, length, err := callKernProcSyscall(KernProcAll, 0)
+ if err != nil {
+ return results, err
+ }
+
+ // get kinfo_proc size
+ count := int(length / uint64(sizeOfKinfoProc))
+
+ // parse buf to procs
+ for i := 0; i < count; i++ {
+ b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc]
+ k, err := parseKinfoProc(b)
+ if err != nil {
+ continue
+ }
+ p, err := NewProcessWithContext(ctx, int32(k.Pid))
+ if err != nil {
+ continue
+ }
+
+ results = append(results, p)
+ }
+
+ return results, nil
+}
+
+func (p *Process) getKProc() (*KinfoProc, error) {
+ buf, length, err := callKernProcSyscall(KernProcPID, p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ if length != sizeOfKinfoProc {
+ return nil, err
+ }
+
+ k, err := parseKinfoProc(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &k, nil
+}
+
+func callKernProcSyscall(op int32, arg int32) ([]byte, uint64, error) {
+ mib := []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, 0}
+ mibptr := unsafe.Pointer(&mib[0])
+ miblen := uint64(len(mib))
+ length := uint64(0)
+ _, _, err := unix.Syscall6(
+ unix.SYS___SYSCTL,
+ uintptr(mibptr),
+ uintptr(miblen),
+ 0,
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ return nil, length, err
+ }
+
+ count := int32(length / uint64(sizeOfKinfoProc))
+ mib = []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, count}
+ mibptr = unsafe.Pointer(&mib[0])
+ miblen = uint64(len(mib))
+ // get proc info itself
+ buf := make([]byte, length)
+ _, _, err = unix.Syscall6(
+ unix.SYS___SYSCTL,
+ uintptr(mibptr),
+ uintptr(miblen),
+ uintptr(unsafe.Pointer(&buf[0])),
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if err != 0 {
+ return buf, length, err
+ }
+
+ return buf, length, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go
new file mode 100644
index 000000000..f4ed02491
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go
@@ -0,0 +1,202 @@
+//go:build openbsd && 386
+// +build openbsd,386
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs process/types_openbsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 66
+ KernProcAll = 0
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 55
+ KernProcArgv = 1
+ KernProcEnv = 3
+)
+
+const (
+ ArgMax = 256 * 1024
+)
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x38
+ sizeOfKinfoProc = 0x264
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SDEAD = 6
+ SONPROC = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int32
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int32
+ Ixrss int32
+ Idrss int32
+ Isrss int32
+ Minflt int32
+ Majflt int32
+ Nswap int32
+ Inblock int32
+ Oublock int32
+ Msgsnd int32
+ Msgrcv int32
+ Nsignals int32
+ Nvcsw int32
+ Nivcsw int32
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type KinfoProc struct {
+ Forw uint64
+ Back uint64
+ Paddr uint64
+ Addr uint64
+ Fd uint64
+ Stats uint64
+ Limit uint64
+ Vmspace uint64
+ Sigacts uint64
+ Sess uint64
+ Tsess uint64
+ Ru uint64
+ Eflag int32
+ Exitsig int32
+ Flag int32
+ Pid int32
+ Ppid int32
+ Sid int32
+ X_pgid int32
+ Tpgid int32
+ Uid uint32
+ Ruid uint32
+ Gid uint32
+ Rgid uint32
+ Groups [16]uint32
+ Ngroups int16
+ Jobc int16
+ Tdev uint32
+ Estcpu uint32
+ Rtime_sec uint32
+ Rtime_usec uint32
+ Cpticks int32
+ Pctcpu uint32
+ Swtime uint32
+ Slptime uint32
+ Schedflags int32
+ Uticks uint64
+ Sticks uint64
+ Iticks uint64
+ Tracep uint64
+ Traceflag int32
+ Holdcnt int32
+ Siglist int32
+ Sigmask uint32
+ Sigignore uint32
+ Sigcatch uint32
+ Stat int8
+ Priority uint8
+ Usrpri uint8
+ Nice uint8
+ Xstat uint16
+ Acflag uint16
+ Comm [24]int8
+ Wmesg [8]int8
+ Wchan uint64
+ Login [32]int8
+ Vm_rssize int32
+ Vm_tsize int32
+ Vm_dsize int32
+ Vm_ssize int32
+ Uvalid int64
+ Ustart_sec uint64
+ Ustart_usec uint32
+ Uutime_sec uint32
+ Uutime_usec uint32
+ Ustime_sec uint32
+ Ustime_usec uint32
+ Uru_maxrss uint64
+ Uru_ixrss uint64
+ Uru_idrss uint64
+ Uru_isrss uint64
+ Uru_minflt uint64
+ Uru_majflt uint64
+ Uru_nswap uint64
+ Uru_inblock uint64
+ Uru_oublock uint64
+ Uru_msgsnd uint64
+ Uru_msgrcv uint64
+ Uru_nsignals uint64
+ Uru_nvcsw uint64
+ Uru_nivcsw uint64
+ Uctime_sec uint32
+ Uctime_usec uint32
+ Psflags int32
+ Spare int32
+ Svuid uint32
+ Svgid uint32
+ Emul [8]int8
+ Rlim_rss_cur uint64
+ Cpuid uint64
+ Vm_map_size uint64
+ Tid int32
+ Rtableid uint32
+}
+
+type Priority struct{}
+
+type KinfoVmentry struct {
+ Start uint32
+ End uint32
+ Guard uint32
+ Fspace uint32
+ Fspace_augment uint32
+ Offset uint64
+ Wired_count int32
+ Etype int32
+ Protection int32
+ Max_protection int32
+ Advice int32
+ Inheritance int32
+ Flags uint8
+ Pad_cgo_0 [3]byte
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go
new file mode 100644
index 000000000..8607422b5
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go
@@ -0,0 +1,200 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_openbsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 66
+ KernProcAll = 0
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 55
+ KernProcArgv = 1
+ KernProcEnv = 3
+)
+
+const (
+ ArgMax = 256 * 1024
+)
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x50
+ sizeOfKinfoProc = 0x268
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SDEAD = 6
+ SONPROC = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type KinfoProc struct {
+ Forw uint64
+ Back uint64
+ Paddr uint64
+ Addr uint64
+ Fd uint64
+ Stats uint64
+ Limit uint64
+ Vmspace uint64
+ Sigacts uint64
+ Sess uint64
+ Tsess uint64
+ Ru uint64
+ Eflag int32
+ Exitsig int32
+ Flag int32
+ Pid int32
+ Ppid int32
+ Sid int32
+ X_pgid int32
+ Tpgid int32
+ Uid uint32
+ Ruid uint32
+ Gid uint32
+ Rgid uint32
+ Groups [16]uint32
+ Ngroups int16
+ Jobc int16
+ Tdev uint32
+ Estcpu uint32
+ Rtime_sec uint32
+ Rtime_usec uint32
+ Cpticks int32
+ Pctcpu uint32
+ Swtime uint32
+ Slptime uint32
+ Schedflags int32
+ Uticks uint64
+ Sticks uint64
+ Iticks uint64
+ Tracep uint64
+ Traceflag int32
+ Holdcnt int32
+ Siglist int32
+ Sigmask uint32
+ Sigignore uint32
+ Sigcatch uint32
+ Stat int8
+ Priority uint8
+ Usrpri uint8
+ Nice uint8
+ Xstat uint16
+ Acflag uint16
+ Comm [24]int8
+ Wmesg [8]int8
+ Wchan uint64
+ Login [32]int8
+ Vm_rssize int32
+ Vm_tsize int32
+ Vm_dsize int32
+ Vm_ssize int32
+ Uvalid int64
+ Ustart_sec uint64
+ Ustart_usec uint32
+ Uutime_sec uint32
+ Uutime_usec uint32
+ Ustime_sec uint32
+ Ustime_usec uint32
+ Pad_cgo_0 [4]byte
+ Uru_maxrss uint64
+ Uru_ixrss uint64
+ Uru_idrss uint64
+ Uru_isrss uint64
+ Uru_minflt uint64
+ Uru_majflt uint64
+ Uru_nswap uint64
+ Uru_inblock uint64
+ Uru_oublock uint64
+ Uru_msgsnd uint64
+ Uru_msgrcv uint64
+ Uru_nsignals uint64
+ Uru_nvcsw uint64
+ Uru_nivcsw uint64
+ Uctime_sec uint32
+ Uctime_usec uint32
+ Psflags int32
+ Spare int32
+ Svuid uint32
+ Svgid uint32
+ Emul [8]int8
+ Rlim_rss_cur uint64
+ Cpuid uint64
+ Vm_map_size uint64
+ Tid int32
+ Rtableid uint32
+}
+
+type Priority struct{}
+
+type KinfoVmentry struct {
+ Start uint64
+ End uint64
+ Guard uint64
+ Fspace uint64
+ Fspace_augment uint64
+ Offset uint64
+ Wired_count int32
+ Etype int32
+ Protection int32
+ Max_protection int32
+ Advice int32
+ Inheritance int32
+ Flags uint8
+ Pad_cgo_0 [7]byte
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go
new file mode 100644
index 000000000..b94429f2e
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go
@@ -0,0 +1,202 @@
+//go:build openbsd && arm
+// +build openbsd,arm
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs process/types_openbsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 66
+ KernProcAll = 0
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 55
+ KernProcArgv = 1
+ KernProcEnv = 3
+)
+
+const (
+ ArgMax = 256 * 1024
+)
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x38
+ sizeOfKinfoProc = 0x264
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SDEAD = 6
+ SONPROC = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int32
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int32
+ Ixrss int32
+ Idrss int32
+ Isrss int32
+ Minflt int32
+ Majflt int32
+ Nswap int32
+ Inblock int32
+ Oublock int32
+ Msgsnd int32
+ Msgrcv int32
+ Nsignals int32
+ Nvcsw int32
+ Nivcsw int32
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type KinfoProc struct {
+ Forw uint64
+ Back uint64
+ Paddr uint64
+ Addr uint64
+ Fd uint64
+ Stats uint64
+ Limit uint64
+ Vmspace uint64
+ Sigacts uint64
+ Sess uint64
+ Tsess uint64
+ Ru uint64
+ Eflag int32
+ Exitsig int32
+ Flag int32
+ Pid int32
+ Ppid int32
+ Sid int32
+ X_pgid int32
+ Tpgid int32
+ Uid uint32
+ Ruid uint32
+ Gid uint32
+ Rgid uint32
+ Groups [16]uint32
+ Ngroups int16
+ Jobc int16
+ Tdev uint32
+ Estcpu uint32
+ Rtime_sec uint32
+ Rtime_usec uint32
+ Cpticks int32
+ Pctcpu uint32
+ Swtime uint32
+ Slptime uint32
+ Schedflags int32
+ Uticks uint64
+ Sticks uint64
+ Iticks uint64
+ Tracep uint64
+ Traceflag int32
+ Holdcnt int32
+ Siglist int32
+ Sigmask uint32
+ Sigignore uint32
+ Sigcatch uint32
+ Stat int8
+ Priority uint8
+ Usrpri uint8
+ Nice uint8
+ Xstat uint16
+ Acflag uint16
+ Comm [24]int8
+ Wmesg [8]int8
+ Wchan uint64
+ Login [32]int8
+ Vm_rssize int32
+ Vm_tsize int32
+ Vm_dsize int32
+ Vm_ssize int32
+ Uvalid int64
+ Ustart_sec uint64
+ Ustart_usec uint32
+ Uutime_sec uint32
+ Uutime_usec uint32
+ Ustime_sec uint32
+ Ustime_usec uint32
+ Uru_maxrss uint64
+ Uru_ixrss uint64
+ Uru_idrss uint64
+ Uru_isrss uint64
+ Uru_minflt uint64
+ Uru_majflt uint64
+ Uru_nswap uint64
+ Uru_inblock uint64
+ Uru_oublock uint64
+ Uru_msgsnd uint64
+ Uru_msgrcv uint64
+ Uru_nsignals uint64
+ Uru_nvcsw uint64
+ Uru_nivcsw uint64
+ Uctime_sec uint32
+ Uctime_usec uint32
+ Psflags int32
+ Spare int32
+ Svuid uint32
+ Svgid uint32
+ Emul [8]int8
+ Rlim_rss_cur uint64
+ Cpuid uint64
+ Vm_map_size uint64
+ Tid int32
+ Rtableid uint32
+}
+
+type Priority struct{}
+
+type KinfoVmentry struct {
+ Start uint32
+ End uint32
+ Guard uint32
+ Fspace uint32
+ Fspace_augment uint32
+ Offset uint64
+ Wired_count int32
+ Etype int32
+ Protection int32
+ Max_protection int32
+ Advice int32
+ Inheritance int32
+ Flags uint8
+ Pad_cgo_0 [3]byte
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go
new file mode 100644
index 000000000..a3291b8ca
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go
@@ -0,0 +1,203 @@
+//go:build openbsd && arm64
+// +build openbsd,arm64
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs process/types_openbsd.go
+
+package process
+
+const (
+ CTLKern = 1
+ KernProc = 66
+ KernProcAll = 0
+ KernProcPID = 1
+ KernProcProc = 8
+ KernProcPathname = 12
+ KernProcArgs = 55
+ KernProcArgv = 1
+ KernProcEnv = 3
+)
+
+const (
+ ArgMax = 256 * 1024
+)
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+)
+
+const (
+ sizeOfKinfoVmentry = 0x50
+ sizeOfKinfoProc = 0x270
+)
+
+const (
+ SIDL = 1
+ SRUN = 2
+ SSLEEP = 3
+ SSTOP = 4
+ SZOMB = 5
+ SDEAD = 6
+ SONPROC = 7
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type KinfoProc struct {
+ Forw uint64
+ Back uint64
+ Paddr uint64
+ Addr uint64
+ Fd uint64
+ Stats uint64
+ Limit uint64
+ Vmspace uint64
+ Sigacts uint64
+ Sess uint64
+ Tsess uint64
+ Ru uint64
+ Eflag int32
+ Exitsig int32
+ Flag int32
+ Pid int32
+ Ppid int32
+ Sid int32
+ X_pgid int32
+ Tpgid int32
+ Uid uint32
+ Ruid uint32
+ Gid uint32
+ Rgid uint32
+ Groups [16]uint32
+ Ngroups int16
+ Jobc int16
+ Tdev uint32
+ Estcpu uint32
+ Rtime_sec uint32
+ Rtime_usec uint32
+ Cpticks int32
+ Pctcpu uint32
+ Swtime uint32
+ Slptime uint32
+ Schedflags int32
+ Uticks uint64
+ Sticks uint64
+ Iticks uint64
+ Tracep uint64
+ Traceflag int32
+ Holdcnt int32
+ Siglist int32
+ Sigmask uint32
+ Sigignore uint32
+ Sigcatch uint32
+ Stat int8
+ Priority uint8
+ Usrpri uint8
+ Nice uint8
+ Xstat uint16
+ Acflag uint16
+ Comm [24]int8
+ Wmesg [8]uint8
+ Wchan uint64
+ Login [32]uint8
+ Vm_rssize int32
+ Vm_tsize int32
+ Vm_dsize int32
+ Vm_ssize int32
+ Uvalid int64
+ Ustart_sec uint64
+ Ustart_usec uint32
+ Uutime_sec uint32
+ Uutime_usec uint32
+ Ustime_sec uint32
+ Ustime_usec uint32
+ Uru_maxrss uint64
+ Uru_ixrss uint64
+ Uru_idrss uint64
+ Uru_isrss uint64
+ Uru_minflt uint64
+ Uru_majflt uint64
+ Uru_nswap uint64
+ Uru_inblock uint64
+ Uru_oublock uint64
+ Uru_msgsnd uint64
+ Uru_msgrcv uint64
+ Uru_nsignals uint64
+ Uru_nvcsw uint64
+ Uru_nivcsw uint64
+ Uctime_sec uint32
+ Uctime_usec uint32
+ Psflags uint32
+ Spare int32
+ Svuid uint32
+ Svgid uint32
+ Emul [8]uint8
+ Rlim_rss_cur uint64
+ Cpuid uint64
+ Vm_map_size uint64
+ Tid int32
+ Rtableid uint32
+ Pledge uint64
+}
+
+type Priority struct{}
+
+type KinfoVmentry struct {
+ Start uint64
+ End uint64
+ Guard uint64
+ Fspace uint64
+ Fspace_augment uint64
+ Offset uint64
+ Wired_count int32
+ Etype int32
+ Protection int32
+ Max_protection int32
+ Advice int32
+ Inheritance int32
+ Flags uint8
+ Pad_cgo_0 [7]byte
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go
new file mode 100644
index 000000000..bc4bc062a
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go
@@ -0,0 +1,203 @@
+//go:build plan9
+// +build plan9
+
+package process
+
+import (
+ "context"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/net"
+)
+
+type Signal = syscall.Note
+
+type MemoryMapsStat struct {
+ Path string `json:"path"`
+ Rss uint64 `json:"rss"`
+ Size uint64 `json:"size"`
+ Pss uint64 `json:"pss"`
+ SharedClean uint64 `json:"sharedClean"`
+ SharedDirty uint64 `json:"sharedDirty"`
+ PrivateClean uint64 `json:"privateClean"`
+ PrivateDirty uint64 `json:"privateDirty"`
+ Referenced uint64 `json:"referenced"`
+ Anonymous uint64 `json:"anonymous"`
+ Swap uint64 `json:"swap"`
+}
+
+type MemoryInfoExStat struct{}
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) {
+ return false, common.ErrNotImplementedError
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ return []string{""}, common.ErrNotImplementedError
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ return false, common.ErrNotImplementedError
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) SuspendWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) ResumeWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) TerminateWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) KillWithContext(ctx context.Context) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) UsernameWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
+ return nil, common.ErrNotImplementedError
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go
new file mode 100644
index 000000000..88e2bff53
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go
@@ -0,0 +1,184 @@
+//go:build linux || freebsd || openbsd || darwin || solaris
+// +build linux freebsd openbsd darwin solaris
+
+package process
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/unix"
+)
+
+type Signal = syscall.Signal
+
+// POSIX
+func getTerminalMap() (map[uint64]string, error) {
+ ret := make(map[uint64]string)
+ var termfiles []string
+
+ d, err := os.Open("/dev")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ devnames, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, devname := range devnames {
+ if strings.HasPrefix(devname, "/dev/tty") {
+ termfiles = append(termfiles, "/dev/tty/"+devname)
+ }
+ }
+
+ var ptsnames []string
+ ptsd, err := os.Open("/dev/pts")
+ if err != nil {
+ ptsnames, _ = filepath.Glob("/dev/ttyp*")
+ if ptsnames == nil {
+ return nil, err
+ }
+ }
+ defer ptsd.Close()
+
+ if ptsnames == nil {
+ defer ptsd.Close()
+ ptsnames, err = ptsd.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, ptsname := range ptsnames {
+ termfiles = append(termfiles, "/dev/pts/"+ptsname)
+ }
+ } else {
+ termfiles = ptsnames
+ }
+
+ for _, name := range termfiles {
+ stat := unix.Stat_t{}
+ if err = unix.Stat(name, &stat); err != nil {
+ return nil, err
+ }
+ rdev := uint64(stat.Rdev)
+ ret[rdev] = strings.Replace(name, "/dev", "", -1)
+ }
+ return ret, nil
+}
+
+// isMount is a port of python's os.path.ismount()
+// https://github.com/python/cpython/blob/08ff4369afca84587b1c82034af4e9f64caddbf2/Lib/posixpath.py#L186-L216
+// https://docs.python.org/3/library/os.path.html#os.path.ismount
+func isMount(path string) bool {
+ // Check symlinkness with os.Lstat; unix.DT_LNK is not portable
+ fileInfo, err := os.Lstat(path)
+ if err != nil {
+ return false
+ }
+ if fileInfo.Mode()&os.ModeSymlink != 0 {
+ return false
+ }
+ var stat1 unix.Stat_t
+ if err := unix.Lstat(path, &stat1); err != nil {
+ return false
+ }
+ parent := filepath.Join(path, "..")
+ var stat2 unix.Stat_t
+ if err := unix.Lstat(parent, &stat2); err != nil {
+ return false
+ }
+ return stat1.Dev != stat2.Dev || stat1.Ino == stat2.Ino
+}
+
+func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) {
+ if pid <= 0 {
+ return false, fmt.Errorf("invalid pid %v", pid)
+ }
+ proc, err := os.FindProcess(int(pid))
+ if err != nil {
+ return false, err
+ }
+
+ if isMount(common.HostProc()) { // if //proc exists and is mounted, check if //proc/ folder exists
+ _, err := os.Stat(common.HostProc(strconv.Itoa(int(pid))))
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return err == nil, err
+ }
+
+ // procfs does not exist or is not mounted, check PID existence by signalling the pid
+ err = proc.Signal(syscall.Signal(0))
+ if err == nil {
+ return true, nil
+ }
+ if err.Error() == "os: process already finished" {
+ return false, nil
+ }
+ var errno syscall.Errno
+ if !errors.As(err, &errno) {
+ return false, err
+ }
+ switch errno {
+ case syscall.ESRCH:
+ return false, nil
+ case syscall.EPERM:
+ return true, nil
+ }
+
+ return false, err
+}
+
+func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error {
+ process, err := os.FindProcess(int(p.Pid))
+ if err != nil {
+ return err
+ }
+
+ err = process.Signal(sig)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Process) SuspendWithContext(ctx context.Context) error {
+ return p.SendSignalWithContext(ctx, unix.SIGSTOP)
+}
+
+func (p *Process) ResumeWithContext(ctx context.Context) error {
+ return p.SendSignalWithContext(ctx, unix.SIGCONT)
+}
+
+func (p *Process) TerminateWithContext(ctx context.Context) error {
+ return p.SendSignalWithContext(ctx, unix.SIGTERM)
+}
+
+func (p *Process) KillWithContext(ctx context.Context) error {
+ return p.SendSignalWithContext(ctx, unix.SIGKILL)
+}
+
+func (p *Process) UsernameWithContext(ctx context.Context) (string, error) {
+ uids, err := p.UidsWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(uids) > 0 {
+ u, err := user.LookupId(strconv.Itoa(int(uids[0])))
+ if err != nil {
+ return "", err
+ }
+ return u.Username, nil
+ }
+ return "", nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go
new file mode 100644
index 000000000..4f10a67bc
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go
@@ -0,0 +1,304 @@
+package process
+
+import (
+ "bytes"
+ "context"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/net"
+)
+
+type MemoryMapsStat struct {
+ Path string `json:"path"`
+ Rss uint64 `json:"rss"`
+ Size uint64 `json:"size"`
+ Pss uint64 `json:"pss"`
+ SharedClean uint64 `json:"sharedClean"`
+ SharedDirty uint64 `json:"sharedDirty"`
+ PrivateClean uint64 `json:"privateClean"`
+ PrivateDirty uint64 `json:"privateDirty"`
+ Referenced uint64 `json:"referenced"`
+ Anonymous uint64 `json:"anonymous"`
+ Swap uint64 `json:"swap"`
+}
+
+type MemoryInfoExStat struct{}
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ return readPidsFromDir(common.HostProc())
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ out := []*Process{}
+
+ pids, err := PidsWithContext(ctx)
+ if err != nil {
+ return out, err
+ }
+
+ for _, pid := range pids {
+ p, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ continue
+ }
+ out = append(out, p)
+ }
+
+ return out, nil
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ exe, err := p.fillFromPathAOutWithContext(ctx)
+ if os.IsNotExist(err) {
+ exe, err = p.fillFromExecnameWithContext(ctx)
+ }
+ return exe, err
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ return p.fillFromCmdlineWithContext(ctx)
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ return p.fillSliceFromCmdlineWithContext(ctx)
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ return p.fillFromPathCwdWithContext(ctx)
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ return []string{""}, common.ErrNotImplementedError
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ return false, common.ErrNotImplementedError
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
+ _, fnames, err := p.fillFromfdListWithContext(ctx)
+ return int32(len(fnames)), err
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+/**
+** Internal functions
+**/
+
+func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) {
+ pid := p.Pid
+ statPath := common.HostProc(strconv.Itoa(int(pid)), "fd")
+ d, err := os.Open(statPath)
+ if err != nil {
+ return statPath, []string{}, err
+ }
+ defer d.Close()
+ fnames, err := d.Readdirnames(-1)
+ return statPath, fnames, err
+}
+
+func (p *Process) fillFromPathCwdWithContext(ctx context.Context) (string, error) {
+ pid := p.Pid
+ cwdPath := common.HostProc(strconv.Itoa(int(pid)), "path", "cwd")
+ cwd, err := os.Readlink(cwdPath)
+ if err != nil {
+ return "", err
+ }
+ return cwd, nil
+}
+
+func (p *Process) fillFromPathAOutWithContext(ctx context.Context) (string, error) {
+ pid := p.Pid
+ cwdPath := common.HostProc(strconv.Itoa(int(pid)), "path", "a.out")
+ exe, err := os.Readlink(cwdPath)
+ if err != nil {
+ return "", err
+ }
+ return exe, nil
+}
+
+func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, error) {
+ pid := p.Pid
+ execNamePath := common.HostProc(strconv.Itoa(int(pid)), "execname")
+ exe, err := ioutil.ReadFile(execNamePath)
+ if err != nil {
+ return "", err
+ }
+ return string(exe), nil
+}
+
+func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) {
+ pid := p.Pid
+ cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
+ cmdline, err := ioutil.ReadFile(cmdPath)
+ if err != nil {
+ return "", err
+ }
+ ret := strings.FieldsFunc(string(cmdline), func(r rune) bool {
+ if r == '\u0000' {
+ return true
+ }
+ return false
+ })
+
+ return strings.Join(ret, " "), nil
+}
+
+func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) {
+ pid := p.Pid
+ cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
+ cmdline, err := ioutil.ReadFile(cmdPath)
+ if err != nil {
+ return nil, err
+ }
+ if len(cmdline) == 0 {
+ return nil, nil
+ }
+ if cmdline[len(cmdline)-1] == 0 {
+ cmdline = cmdline[:len(cmdline)-1]
+ }
+ parts := bytes.Split(cmdline, []byte{0})
+ var strParts []string
+ for _, p := range parts {
+ strParts = append(strParts, string(p))
+ }
+
+ return strParts, nil
+}
+
+func readPidsFromDir(path string) ([]int32, error) {
+ var ret []int32
+
+ d, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ fnames, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, fname := range fnames {
+ pid, err := strconv.ParseInt(fname, 10, 32)
+ if err != nil {
+ // if not numeric name, just skip
+ continue
+ }
+ ret = append(ret, int32(pid))
+ }
+
+ return ret, nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go
new file mode 100644
index 000000000..18f4f9455
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go
@@ -0,0 +1,1169 @@
+//go:build windows
+// +build windows
+
+package process
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "syscall"
+ "time"
+ "unicode/utf16"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "github.com/shirou/gopsutil/v3/net"
+ "golang.org/x/sys/windows"
+)
+
+type Signal = syscall.Signal
+
+var (
+ modntdll = windows.NewLazySystemDLL("ntdll.dll")
+ procNtResumeProcess = modntdll.NewProc("NtResumeProcess")
+ procNtSuspendProcess = modntdll.NewProc("NtSuspendProcess")
+
+ modpsapi = windows.NewLazySystemDLL("psapi.dll")
+ procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
+ procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW")
+
+ advapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ procLookupPrivilegeValue = advapi32.NewProc("LookupPrivilegeValueW")
+ procAdjustTokenPrivileges = advapi32.NewProc("AdjustTokenPrivileges")
+
+ procQueryFullProcessImageNameW = common.Modkernel32.NewProc("QueryFullProcessImageNameW")
+ procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass")
+ procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters")
+ procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo")
+
+ processorArchitecture uint
+)
+
+const processQueryInformation = windows.PROCESS_QUERY_LIMITED_INFORMATION
+
+type systemProcessorInformation struct {
+ ProcessorArchitecture uint16
+ ProcessorLevel uint16
+ ProcessorRevision uint16
+ Reserved uint16
+ ProcessorFeatureBits uint16
+}
+
+type systemInfo struct {
+ wProcessorArchitecture uint16
+ wReserved uint16
+ dwpageSize uint32
+ lpMinimumApplicationAddress uintptr
+ lpMaximumApplicationAddress uintptr
+ dwActiveProcessorMask uintptr
+ dwNumberOfProcessors uint32
+ dwProcessorType uint32
+ dwAllocationGranularity uint32
+ wProcessorLevel uint16
+ wProcessorRevision uint16
+}
+
+// Memory_info_ex is different between OSes
+type MemoryInfoExStat struct{}
+
+type MemoryMapsStat struct{}
+
+// ioCounters is an equivalent representation of IO_COUNTERS in the Windows API.
+// https://docs.microsoft.com/windows/win32/api/winnt/ns-winnt-io_counters
+type ioCounters struct {
+ ReadOperationCount uint64
+ WriteOperationCount uint64
+ OtherOperationCount uint64
+ ReadTransferCount uint64
+ WriteTransferCount uint64
+ OtherTransferCount uint64
+}
+
+type processBasicInformation32 struct {
+ Reserved1 uint32
+ PebBaseAddress uint32
+ Reserved2 uint32
+ Reserved3 uint32
+ UniqueProcessId uint32
+ Reserved4 uint32
+}
+
+type processBasicInformation64 struct {
+ Reserved1 uint64
+ PebBaseAddress uint64
+ Reserved2 uint64
+ Reserved3 uint64
+ UniqueProcessId uint64
+ Reserved4 uint64
+}
+
+type processEnvironmentBlock32 struct {
+ Reserved1 [2]uint8
+ BeingDebugged uint8
+ Reserved2 uint8
+ Reserved3 [2]uint32
+ Ldr uint32
+ ProcessParameters uint32
+ // More fields which we don't use so far
+}
+
+type processEnvironmentBlock64 struct {
+ Reserved1 [2]uint8
+ BeingDebugged uint8
+ Reserved2 uint8
+ _ [4]uint8 // padding, since we are 64 bit, the next pointer is 64 bit aligned (when compiling for 32 bit, this is not the case without manual padding)
+ Reserved3 [2]uint64
+ Ldr uint64
+ ProcessParameters uint64
+ // More fields which we don't use so far
+}
+
+type rtlUserProcessParameters32 struct {
+ Reserved1 [16]uint8
+ ConsoleHandle uint32
+ ConsoleFlags uint32
+ StdInputHandle uint32
+ StdOutputHandle uint32
+ StdErrorHandle uint32
+ CurrentDirectoryPathNameLength uint16
+ _ uint16 // Max Length
+ CurrentDirectoryPathAddress uint32
+ CurrentDirectoryHandle uint32
+ DllPathNameLength uint16
+ _ uint16 // Max Length
+ DllPathAddress uint32
+ ImagePathNameLength uint16
+ _ uint16 // Max Length
+ ImagePathAddress uint32
+ CommandLineLength uint16
+ _ uint16 // Max Length
+ CommandLineAddress uint32
+ EnvironmentAddress uint32
+ // More fields which we don't use so far
+}
+
+type rtlUserProcessParameters64 struct {
+ Reserved1 [16]uint8
+ ConsoleHandle uint64
+ ConsoleFlags uint64
+ StdInputHandle uint64
+ StdOutputHandle uint64
+ StdErrorHandle uint64
+ CurrentDirectoryPathNameLength uint16
+ _ uint16 // Max Length
+ _ uint32 // Padding
+ CurrentDirectoryPathAddress uint64
+ CurrentDirectoryHandle uint64
+ DllPathNameLength uint16
+ _ uint16 // Max Length
+ _ uint32 // Padding
+ DllPathAddress uint64
+ ImagePathNameLength uint16
+ _ uint16 // Max Length
+ _ uint32 // Padding
+ ImagePathAddress uint64
+ CommandLineLength uint16
+ _ uint16 // Max Length
+ _ uint32 // Padding
+ CommandLineAddress uint64
+ EnvironmentAddress uint64
+ // More fields which we don't use so far
+}
+
+type winLUID struct {
+ LowPart winDWord
+ HighPart winLong
+}
+
+// LUID_AND_ATTRIBUTES
+type winLUIDAndAttributes struct {
+ Luid winLUID
+ Attributes winDWord
+}
+
+// TOKEN_PRIVILEGES
+type winTokenPrivileges struct {
+ PrivilegeCount winDWord
+ Privileges [1]winLUIDAndAttributes
+}
+
+type (
+ winLong int32
+ winDWord uint32
+)
+
+func init() {
+ var systemInfo systemInfo
+
+ procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo)))
+ processorArchitecture = uint(systemInfo.wProcessorArchitecture)
+
+ // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119
+ handle, err := syscall.GetCurrentProcess()
+ if err != nil {
+ return
+ }
+
+ var token syscall.Token
+ err = syscall.OpenProcessToken(handle, 0x0028, &token)
+ if err != nil {
+ return
+ }
+ defer token.Close()
+
+ tokenPrivileges := winTokenPrivileges{PrivilegeCount: 1}
+ lpName := syscall.StringToUTF16("SeDebugPrivilege")
+ ret, _, _ := procLookupPrivilegeValue.Call(
+ 0,
+ uintptr(unsafe.Pointer(&lpName[0])),
+ uintptr(unsafe.Pointer(&tokenPrivileges.Privileges[0].Luid)))
+ if ret == 0 {
+ return
+ }
+
+ tokenPrivileges.Privileges[0].Attributes = 0x00000002 // SE_PRIVILEGE_ENABLED
+
+ procAdjustTokenPrivileges.Call(
+ uintptr(token),
+ 0,
+ uintptr(unsafe.Pointer(&tokenPrivileges)),
+ uintptr(unsafe.Sizeof(tokenPrivileges)),
+ 0,
+ 0)
+}
+
+func pidsWithContext(ctx context.Context) ([]int32, error) {
+ // inspired by https://gist.github.com/henkman/3083408
+ // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329
+ var ret []int32
+ var read uint32 = 0
+ var psSize uint32 = 1024
+ const dwordSize uint32 = 4
+
+ for {
+ ps := make([]uint32, psSize)
+ if err := windows.EnumProcesses(ps, &read); err != nil {
+ return nil, err
+ }
+ if uint32(len(ps)) == read { // ps buffer was too small to host every results, retry with a bigger one
+ psSize += 1024
+ continue
+ }
+ for _, pid := range ps[:read/dwordSize] {
+ ret = append(ret, int32(pid))
+ }
+ return ret, nil
+
+ }
+}
+
+func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) {
+ if pid == 0 { // special case for pid 0 System Idle Process
+ return true, nil
+ }
+ if pid < 0 {
+ return false, fmt.Errorf("invalid pid %v", pid)
+ }
+ if pid%4 != 0 {
+ // OpenProcess will succeed even on non-existing pid here https://devblogs.microsoft.com/oldnewthing/20080606-00/?p=22043
+ // so we list every pid just to be sure and be future-proof
+ pids, err := PidsWithContext(ctx)
+ if err != nil {
+ return false, err
+ }
+ for _, i := range pids {
+ if i == pid {
+ return true, err
+ }
+ }
+ return false, err
+ }
+ h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid))
+ if err == windows.ERROR_ACCESS_DENIED {
+ return true, nil
+ }
+ if err == windows.ERROR_INVALID_PARAMETER {
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ defer windows.CloseHandle(h)
+ event, err := windows.WaitForSingleObject(h, 0)
+ return event == uint32(windows.WAIT_TIMEOUT), err
+}
+
+func (p *Process) PpidWithContext(ctx context.Context) (int32, error) {
+ // if cached already, return from cache
+ cachedPpid := p.getPpid()
+ if cachedPpid != 0 {
+ return cachedPpid, nil
+ }
+
+ ppid, _, _, err := getFromSnapProcess(p.Pid)
+ if err != nil {
+ return 0, err
+ }
+
+ // no errors and not cached already, so cache it
+ p.setPpid(ppid)
+
+ return ppid, nil
+}
+
+func (p *Process) NameWithContext(ctx context.Context) (string, error) {
+ ppid, _, name, err := getFromSnapProcess(p.Pid)
+ if err != nil {
+ return "", fmt.Errorf("could not get Name: %s", err)
+ }
+
+ // if no errors and not cached already, cache ppid
+ p.parent = ppid
+ if 0 == p.getPpid() {
+ p.setPpid(ppid)
+ }
+
+ return name, nil
+}
+
+func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid))
+ if err != nil {
+ return "", err
+ }
+ defer windows.CloseHandle(c)
+ buf := make([]uint16, syscall.MAX_LONG_PATH)
+ size := uint32(syscall.MAX_LONG_PATH)
+ if err := procQueryFullProcessImageNameW.Find(); err == nil { // Vista+
+ ret, _, err := procQueryFullProcessImageNameW.Call(
+ uintptr(c),
+ uintptr(0),
+ uintptr(unsafe.Pointer(&buf[0])),
+ uintptr(unsafe.Pointer(&size)))
+ if ret == 0 {
+ return "", err
+ }
+ return windows.UTF16ToString(buf[:]), nil
+ }
+ // XP fallback
+ ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size))
+ if ret == 0 {
+ return "", err
+ }
+ return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil
+}
+
+func (p *Process) CmdlineWithContext(_ context.Context) (string, error) {
+ cmdline, err := getProcessCommandLine(p.Pid)
+ if err != nil {
+ return "", fmt.Errorf("could not get CommandLine: %s", err)
+ }
+ return cmdline, nil
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ cmdline, err := p.CmdlineWithContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return strings.Split(cmdline, " "), nil
+}
+
+func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
+ ru, err := getRusage(p.Pid)
+ if err != nil {
+ return 0, fmt.Errorf("could not get CreationDate: %s", err)
+ }
+
+ return ru.CreationTime.Nanoseconds() / 1000000, nil
+}
+
+func (p *Process) CwdWithContext(_ context.Context) (string, error) {
+ h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(p.Pid))
+ if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER {
+ return "", nil
+ }
+ if err != nil {
+ return "", err
+ }
+ defer syscall.CloseHandle(syscall.Handle(h))
+
+ procIs32Bits := is32BitProcess(h)
+
+ if procIs32Bits {
+ userProcParams, err := getUserProcessParams32(h)
+ if err != nil {
+ return "", err
+ }
+ if userProcParams.CurrentDirectoryPathNameLength > 0 {
+ cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CurrentDirectoryPathAddress), uint(userProcParams.CurrentDirectoryPathNameLength))
+ if len(cwd) != int(userProcParams.CurrentDirectoryPathAddress) {
+ return "", errors.New("cannot read current working directory")
+ }
+
+ return convertUTF16ToString(cwd), nil
+ }
+ } else {
+ userProcParams, err := getUserProcessParams64(h)
+ if err != nil {
+ return "", err
+ }
+ if userProcParams.CurrentDirectoryPathNameLength > 0 {
+ cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CurrentDirectoryPathAddress, uint(userProcParams.CurrentDirectoryPathNameLength))
+ if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) {
+ return "", errors.New("cannot read current working directory")
+ }
+
+ return convertUTF16ToString(cwd), nil
+ }
+ }
+
+ // if we reach here, we have no cwd
+ return "", nil
+}
+
+func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
+ return []string{""}, common.ErrNotImplementedError
+}
+
+func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
+ return false, common.ErrNotImplementedError
+}
+
+func (p *Process) UsernameWithContext(ctx context.Context) (string, error) {
+ pid := p.Pid
+ c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid))
+ if err != nil {
+ return "", err
+ }
+ defer windows.CloseHandle(c)
+
+ var token syscall.Token
+ err = syscall.OpenProcessToken(syscall.Handle(c), syscall.TOKEN_QUERY, &token)
+ if err != nil {
+ return "", err
+ }
+ defer token.Close()
+ tokenUser, err := token.GetTokenUser()
+ if err != nil {
+ return "", err
+ }
+
+ user, domain, _, err := tokenUser.User.Sid.LookupAccount("")
+ return domain + "\\" + user, err
+}
+
+func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TerminalWithContext(ctx context.Context) (string, error) {
+ return "", common.ErrNotImplementedError
+}
+
+// priorityClasses maps a win32 priority class to its WMI equivalent Win32_Process.Priority
+// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass
+// https://docs.microsoft.com/en-us/windows/desktop/cimwin32prov/win32-process
+var priorityClasses = map[int]int32{
+ 0x00008000: 10, // ABOVE_NORMAL_PRIORITY_CLASS
+ 0x00004000: 6, // BELOW_NORMAL_PRIORITY_CLASS
+ 0x00000080: 13, // HIGH_PRIORITY_CLASS
+ 0x00000040: 4, // IDLE_PRIORITY_CLASS
+ 0x00000020: 8, // NORMAL_PRIORITY_CLASS
+ 0x00000100: 24, // REALTIME_PRIORITY_CLASS
+}
+
+func (p *Process) NiceWithContext(ctx context.Context) (int32, error) {
+ c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid))
+ if err != nil {
+ return 0, err
+ }
+ defer windows.CloseHandle(c)
+ ret, _, err := procGetPriorityClass.Call(uintptr(c))
+ if ret == 0 {
+ return 0, err
+ }
+ priority, ok := priorityClasses[int(ret)]
+ if !ok {
+ return 0, fmt.Errorf("unknown priority class %v", ret)
+ }
+ return priority, nil
+}
+
+func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
+ c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid))
+ if err != nil {
+ return nil, err
+ }
+ defer windows.CloseHandle(c)
+ var ioCounters ioCounters
+ ret, _, err := procGetProcessIoCounters.Call(uintptr(c), uintptr(unsafe.Pointer(&ioCounters)))
+ if ret == 0 {
+ return nil, err
+ }
+ stats := &IOCountersStat{
+ ReadCount: ioCounters.ReadOperationCount,
+ ReadBytes: ioCounters.ReadTransferCount,
+ WriteCount: ioCounters.WriteOperationCount,
+ WriteBytes: ioCounters.WriteTransferCount,
+ }
+
+ return stats, nil
+}
+
+func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
+ return 0, common.ErrNotImplementedError
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ ppid, ret, _, err := getFromSnapProcess(p.Pid)
+ if err != nil {
+ return 0, err
+ }
+
+ // if no errors and not cached already, cache ppid
+ p.parent = ppid
+ if 0 == p.getPpid() {
+ p.setPpid(ppid)
+ }
+
+ return ret, nil
+}
+
+func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ sysTimes, err := getProcessCPUTimes(p.Pid)
+ if err != nil {
+ return nil, err
+ }
+
+ // User and kernel times are represented as a FILETIME structure
+ // which contains a 64-bit value representing the number of
+ // 100-nanosecond intervals since January 1, 1601 (UTC):
+ // http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx
+ // To convert it into a float representing the seconds that the
+ // process has executed in user/kernel mode I borrowed the code
+ // below from psutil's _psutil_windows.c, and in turn from Python's
+ // Modules/posixmodule.c
+
+ user := float64(sysTimes.UserTime.HighDateTime)*429.4967296 + float64(sysTimes.UserTime.LowDateTime)*1e-7
+ kernel := float64(sysTimes.KernelTime.HighDateTime)*429.4967296 + float64(sysTimes.KernelTime.LowDateTime)*1e-7
+
+ return &cpu.TimesStat{
+ User: user,
+ System: kernel,
+ }, nil
+}
+
+func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ mem, err := getMemoryInfo(p.Pid)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &MemoryInfoStat{
+ RSS: uint64(mem.WorkingSetSize),
+ VMS: uint64(mem.PagefileUsage),
+ }
+
+ return ret, nil
+}
+
+func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
+ out := []*Process{}
+ snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0))
+ if err != nil {
+ return out, err
+ }
+ defer windows.CloseHandle(snap)
+ var pe32 windows.ProcessEntry32
+ pe32.Size = uint32(unsafe.Sizeof(pe32))
+ if err := windows.Process32First(snap, &pe32); err != nil {
+ return out, err
+ }
+ for {
+ if pe32.ParentProcessID == uint32(p.Pid) {
+ p, err := NewProcessWithContext(ctx, int32(pe32.ProcessID))
+ if err == nil {
+ out = append(out, p)
+ }
+ }
+ if err = windows.Process32Next(snap, &pe32); err != nil {
+ break
+ }
+ }
+ return out, nil
+}
+
+func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) {
+ files := make([]OpenFilesStat, 0)
+ fileExists := make(map[string]bool)
+
+ process, err := windows.OpenProcess(common.ProcessQueryInformation, false, uint32(p.Pid))
+ if err != nil {
+ return nil, err
+ }
+
+ buffer := make([]byte, 1024)
+ var size uint32
+
+ st := common.CallWithExpandingBuffer(
+ func() common.NtStatus {
+ return common.NtQuerySystemInformation(
+ common.SystemExtendedHandleInformationClass,
+ &buffer[0],
+ uint32(len(buffer)),
+ &size,
+ )
+ },
+ &buffer,
+ &size,
+ )
+ if st.IsError() {
+ return nil, st.Error()
+ }
+
+ handlesList := (*common.SystemExtendedHandleInformation)(unsafe.Pointer(&buffer[0]))
+ handles := make([]common.SystemExtendedHandleTableEntryInformation, int(handlesList.NumberOfHandles))
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&handles))
+ hdr.Data = uintptr(unsafe.Pointer(&handlesList.Handles[0]))
+
+ currentProcess, err := windows.GetCurrentProcess()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, handle := range handles {
+ var file uintptr
+ if int32(handle.UniqueProcessId) != p.Pid {
+ continue
+ }
+ if windows.DuplicateHandle(process, windows.Handle(handle.HandleValue), currentProcess, (*windows.Handle)(&file),
+ 0, true, windows.DUPLICATE_SAME_ACCESS) != nil {
+ continue
+ }
+ // release the new handle
+ defer windows.CloseHandle(windows.Handle(file))
+
+ fileType, err := windows.GetFileType(windows.Handle(file))
+ if err != nil || fileType != windows.FILE_TYPE_DISK {
+ continue
+ }
+
+ var fileName string
+ ch := make(chan struct{})
+
+ go func() {
+ var buf [syscall.MAX_LONG_PATH]uint16
+ n, err := windows.GetFinalPathNameByHandle(windows.Handle(file), &buf[0], syscall.MAX_LONG_PATH, 0)
+ if err != nil {
+ return
+ }
+
+ fileName = string(utf16.Decode(buf[:n]))
+ ch <- struct{}{}
+ }()
+
+ select {
+ case <-time.NewTimer(100 * time.Millisecond).C:
+ continue
+ case <-ch:
+ fileInfo, err := os.Stat(fileName)
+ if err != nil || fileInfo.IsDir() {
+ continue
+ }
+
+ if _, exists := fileExists[fileName]; !exists {
+ files = append(files, OpenFilesStat{
+ Path: fileName,
+ Fd: uint64(file),
+ })
+ fileExists[fileName] = true
+ }
+ case <-ctx.Done():
+ return files, ctx.Err()
+ }
+ }
+
+ return files, nil
+}
+
+func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) {
+ return net.ConnectionsPidWithContext(ctx, "all", p.Pid)
+}
+
+func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
+ return nil, common.ErrNotImplementedError
+}
+
+func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error {
+ return common.ErrNotImplementedError
+}
+
+func (p *Process) SuspendWithContext(ctx context.Context) error {
+ c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid))
+ if err != nil {
+ return err
+ }
+ defer windows.CloseHandle(c)
+
+ r1, _, _ := procNtSuspendProcess.Call(uintptr(c))
+ if r1 != 0 {
+ // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55
+ return fmt.Errorf("NtStatus='0x%.8X'", r1)
+ }
+
+ return nil
+}
+
+func (p *Process) ResumeWithContext(ctx context.Context) error {
+ c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid))
+ if err != nil {
+ return err
+ }
+ defer windows.CloseHandle(c)
+
+ r1, _, _ := procNtResumeProcess.Call(uintptr(c))
+ if r1 != 0 {
+ // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55
+ return fmt.Errorf("NtStatus='0x%.8X'", r1)
+ }
+
+ return nil
+}
+
+func (p *Process) TerminateWithContext(ctx context.Context) error {
+ proc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, uint32(p.Pid))
+ if err != nil {
+ return err
+ }
+ err = windows.TerminateProcess(proc, 0)
+ windows.CloseHandle(proc)
+ return err
+}
+
+func (p *Process) KillWithContext(ctx context.Context) error {
+ process, err := os.FindProcess(int(p.Pid))
+ if err != nil {
+ return err
+ }
+ return process.Kill()
+}
+
+func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
+ envVars, err := getProcessEnvironmentVariables(p.Pid, ctx)
+ if err != nil {
+ return nil, fmt.Errorf("could not get environment variables: %s", err)
+ }
+ return envVars, nil
+}
+
+// retrieve Ppid in a thread-safe manner
+func (p *Process) getPpid() int32 {
+ p.parentMutex.RLock()
+ defer p.parentMutex.RUnlock()
+ return p.parent
+}
+
+// cache Ppid in a thread-safe manner (WINDOWS ONLY)
+// see https://psutil.readthedocs.io/en/latest/#psutil.Process.ppid
+func (p *Process) setPpid(ppid int32) {
+ p.parentMutex.Lock()
+ defer p.parentMutex.Unlock()
+ p.parent = ppid
+}
+
+func getFromSnapProcess(pid int32) (int32, int32, string, error) {
+ snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(pid))
+ if err != nil {
+ return 0, 0, "", err
+ }
+ defer windows.CloseHandle(snap)
+ var pe32 windows.ProcessEntry32
+ pe32.Size = uint32(unsafe.Sizeof(pe32))
+ if err = windows.Process32First(snap, &pe32); err != nil {
+ return 0, 0, "", err
+ }
+ for {
+ if pe32.ProcessID == uint32(pid) {
+ szexe := windows.UTF16ToString(pe32.ExeFile[:])
+ return int32(pe32.ParentProcessID), int32(pe32.Threads), szexe, nil
+ }
+ if err = windows.Process32Next(snap, &pe32); err != nil {
+ break
+ }
+ }
+ return 0, 0, "", fmt.Errorf("couldn't find pid: %d", pid)
+}
+
+func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
+ out := []*Process{}
+
+ pids, err := PidsWithContext(ctx)
+ if err != nil {
+ return out, fmt.Errorf("could not get Processes %s", err)
+ }
+
+ for _, pid := range pids {
+ p, err := NewProcessWithContext(ctx, pid)
+ if err != nil {
+ continue
+ }
+ out = append(out, p)
+ }
+
+ return out, nil
+}
+
+func getRusage(pid int32) (*windows.Rusage, error) {
+ var CPU windows.Rusage
+
+ c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid))
+ if err != nil {
+ return nil, err
+ }
+ defer windows.CloseHandle(c)
+
+ if err := windows.GetProcessTimes(c, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil {
+ return nil, err
+ }
+
+ return &CPU, nil
+}
+
+func getMemoryInfo(pid int32) (PROCESS_MEMORY_COUNTERS, error) {
+ var mem PROCESS_MEMORY_COUNTERS
+ c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid))
+ if err != nil {
+ return mem, err
+ }
+ defer windows.CloseHandle(c)
+ if err := getProcessMemoryInfo(c, &mem); err != nil {
+ return mem, err
+ }
+
+ return mem, err
+}
+
+func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err error) {
+ r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(h), uintptr(unsafe.Pointer(mem)), uintptr(unsafe.Sizeof(*mem)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+type SYSTEM_TIMES struct {
+ CreateTime syscall.Filetime
+ ExitTime syscall.Filetime
+ KernelTime syscall.Filetime
+ UserTime syscall.Filetime
+}
+
+func getProcessCPUTimes(pid int32) (SYSTEM_TIMES, error) {
+ var times SYSTEM_TIMES
+
+ h, err := windows.OpenProcess(processQueryInformation, false, uint32(pid))
+ if err != nil {
+ return times, err
+ }
+ defer windows.CloseHandle(h)
+
+ err = syscall.GetProcessTimes(
+ syscall.Handle(h),
+ ×.CreateTime,
+ ×.ExitTime,
+ ×.KernelTime,
+ ×.UserTime,
+ )
+
+ return times, err
+}
+
+func getUserProcessParams32(handle windows.Handle) (rtlUserProcessParameters32, error) {
+ pebAddress, err := queryPebAddress(syscall.Handle(handle), true)
+ if err != nil {
+ return rtlUserProcessParameters32{}, fmt.Errorf("cannot locate process PEB: %w", err)
+ }
+
+ buf := readProcessMemory(syscall.Handle(handle), true, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock32{})))
+ if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock32{})) {
+ return rtlUserProcessParameters32{}, fmt.Errorf("cannot read process PEB")
+ }
+ peb := (*processEnvironmentBlock32)(unsafe.Pointer(&buf[0]))
+ userProcessAddress := uint64(peb.ProcessParameters)
+ buf = readProcessMemory(syscall.Handle(handle), true, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters32{})))
+ if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters32{})) {
+ return rtlUserProcessParameters32{}, fmt.Errorf("cannot read user process parameters")
+ }
+ return *(*rtlUserProcessParameters32)(unsafe.Pointer(&buf[0])), nil
+}
+
+func getUserProcessParams64(handle windows.Handle) (rtlUserProcessParameters64, error) {
+ pebAddress, err := queryPebAddress(syscall.Handle(handle), false)
+ if err != nil {
+ return rtlUserProcessParameters64{}, fmt.Errorf("cannot locate process PEB: %w", err)
+ }
+
+ buf := readProcessMemory(syscall.Handle(handle), false, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock64{})))
+ if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock64{})) {
+ return rtlUserProcessParameters64{}, fmt.Errorf("cannot read process PEB")
+ }
+ peb := (*processEnvironmentBlock64)(unsafe.Pointer(&buf[0]))
+ userProcessAddress := peb.ProcessParameters
+ buf = readProcessMemory(syscall.Handle(handle), false, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters64{})))
+ if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters64{})) {
+ return rtlUserProcessParameters64{}, fmt.Errorf("cannot read user process parameters")
+ }
+ return *(*rtlUserProcessParameters64)(unsafe.Pointer(&buf[0])), nil
+}
+
+func is32BitProcess(h windows.Handle) bool {
+ const (
+ PROCESSOR_ARCHITECTURE_INTEL = 0
+ PROCESSOR_ARCHITECTURE_ARM = 5
+ PROCESSOR_ARCHITECTURE_ARM64 = 12
+ PROCESSOR_ARCHITECTURE_IA64 = 6
+ PROCESSOR_ARCHITECTURE_AMD64 = 9
+ )
+
+ var procIs32Bits bool
+ switch processorArchitecture {
+ case PROCESSOR_ARCHITECTURE_INTEL:
+ fallthrough
+ case PROCESSOR_ARCHITECTURE_ARM:
+ procIs32Bits = true
+ case PROCESSOR_ARCHITECTURE_ARM64:
+ fallthrough
+ case PROCESSOR_ARCHITECTURE_IA64:
+ fallthrough
+ case PROCESSOR_ARCHITECTURE_AMD64:
+ var wow64 uint
+
+ ret, _, _ := common.ProcNtQueryInformationProcess.Call(
+ uintptr(h),
+ uintptr(common.ProcessWow64Information),
+ uintptr(unsafe.Pointer(&wow64)),
+ uintptr(unsafe.Sizeof(wow64)),
+ uintptr(0),
+ )
+ if int(ret) >= 0 {
+ if wow64 != 0 {
+ procIs32Bits = true
+ }
+ } else {
+ // if the OS does not support the call, we fallback into the bitness of the app
+ if unsafe.Sizeof(wow64) == 4 {
+ procIs32Bits = true
+ }
+ }
+
+ default:
+ // for other unknown platforms, we rely on process platform
+ if unsafe.Sizeof(processorArchitecture) == 8 {
+ procIs32Bits = false
+ } else {
+ procIs32Bits = true
+ }
+ }
+ return procIs32Bits
+}
+
+func getProcessEnvironmentVariables(pid int32, ctx context.Context) ([]string, error) {
+ h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid))
+ if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(syscall.Handle(h))
+
+ procIs32Bits := is32BitProcess(h)
+
+ var processParameterBlockAddress uint64
+
+ if procIs32Bits {
+ peb, err := getUserProcessParams32(h)
+ if err != nil {
+ return nil, err
+ }
+ processParameterBlockAddress = uint64(peb.EnvironmentAddress)
+ } else {
+ peb, err := getUserProcessParams64(h)
+ if err != nil {
+ return nil, err
+ }
+ processParameterBlockAddress = peb.EnvironmentAddress
+ }
+ envvarScanner := bufio.NewScanner(&processReader{
+ processHandle: h,
+ is32BitProcess: procIs32Bits,
+ offset: processParameterBlockAddress,
+ })
+ envvarScanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ // Check for UTF-16 zero character
+ for i := 0; i < len(data)-1; i += 2 {
+ if data[i] == 0 && data[i+1] == 0 {
+ return i + 2, data[0:i], nil
+ }
+ }
+ if atEOF {
+ return len(data), data, nil
+ }
+ // Request more data
+ return 0, nil, nil
+ })
+ var envVars []string
+ for envvarScanner.Scan() {
+ entry := envvarScanner.Bytes()
+ if len(entry) == 0 {
+ break // Block is finished
+ }
+ envVars = append(envVars, convertUTF16ToString(entry))
+ select {
+ case <-ctx.Done():
+ break
+ default:
+ continue
+ }
+ }
+ if err := envvarScanner.Err(); err != nil {
+ return nil, err
+ }
+ return envVars, nil
+}
+
+type processReader struct {
+ processHandle windows.Handle
+ is32BitProcess bool
+ offset uint64
+}
+
+func (p *processReader) Read(buf []byte) (int, error) {
+ processMemory := readProcessMemory(syscall.Handle(p.processHandle), p.is32BitProcess, p.offset, uint(len(buf)))
+ if len(processMemory) == 0 {
+ return 0, io.EOF
+ }
+ copy(buf, processMemory)
+ p.offset += uint64(len(processMemory))
+ return len(processMemory), nil
+}
+
+func getProcessCommandLine(pid int32) (string, error) {
+ h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid))
+ if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER {
+ return "", nil
+ }
+ if err != nil {
+ return "", err
+ }
+ defer syscall.CloseHandle(syscall.Handle(h))
+
+ procIs32Bits := is32BitProcess(h)
+
+ if procIs32Bits {
+ userProcParams, err := getUserProcessParams32(h)
+ if err != nil {
+ return "", err
+ }
+ if userProcParams.CommandLineLength > 0 {
+ cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CommandLineAddress), uint(userProcParams.CommandLineLength))
+ if len(cmdLine) != int(userProcParams.CommandLineLength) {
+ return "", errors.New("cannot read cmdline")
+ }
+
+ return convertUTF16ToString(cmdLine), nil
+ }
+ } else {
+ userProcParams, err := getUserProcessParams64(h)
+ if err != nil {
+ return "", err
+ }
+ if userProcParams.CommandLineLength > 0 {
+ cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CommandLineAddress, uint(userProcParams.CommandLineLength))
+ if len(cmdLine) != int(userProcParams.CommandLineLength) {
+ return "", errors.New("cannot read cmdline")
+ }
+
+ return convertUTF16ToString(cmdLine), nil
+ }
+ }
+
+ // if we reach here, we have no command line
+ return "", nil
+}
+
+func convertUTF16ToString(src []byte) string {
+ srcLen := len(src) / 2
+
+ codePoints := make([]uint16, srcLen)
+
+ srcIdx := 0
+ for i := 0; i < srcLen; i++ {
+ codePoints[i] = uint16(src[srcIdx]) | uint16(src[srcIdx+1])<<8
+ srcIdx += 2
+ }
+ return syscall.UTF16ToString(codePoints)
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go
new file mode 100644
index 000000000..982287d93
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go
@@ -0,0 +1,109 @@
+//go:build (windows && 386) || (windows && arm)
+// +build windows,386 windows,arm
+
+package process
+
+import (
+ "errors"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+)
+
+type PROCESS_MEMORY_COUNTERS struct {
+ CB uint32
+ PageFaultCount uint32
+ PeakWorkingSetSize uint32
+ WorkingSetSize uint32
+ QuotaPeakPagedPoolUsage uint32
+ QuotaPagedPoolUsage uint32
+ QuotaPeakNonPagedPoolUsage uint32
+ QuotaNonPagedPoolUsage uint32
+ PagefileUsage uint32
+ PeakPagefileUsage uint32
+}
+
+func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, error) {
+ if is32BitProcess {
+ // we are on a 32-bit process reading an external 32-bit process
+ var info processBasicInformation32
+
+ ret, _, _ := common.ProcNtQueryInformationProcess.Call(
+ uintptr(procHandle),
+ uintptr(common.ProcessBasicInformation),
+ uintptr(unsafe.Pointer(&info)),
+ uintptr(unsafe.Sizeof(info)),
+ uintptr(0),
+ )
+ if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS {
+ return uint64(info.PebBaseAddress), nil
+ } else {
+ return 0, windows.NTStatus(ret)
+ }
+ } else {
+ // we are on a 32-bit process reading an external 64-bit process
+ if common.ProcNtWow64QueryInformationProcess64.Find() == nil { // avoid panic
+ var info processBasicInformation64
+
+ ret, _, _ := common.ProcNtWow64QueryInformationProcess64.Call(
+ uintptr(procHandle),
+ uintptr(common.ProcessBasicInformation),
+ uintptr(unsafe.Pointer(&info)),
+ uintptr(unsafe.Sizeof(info)),
+ uintptr(0),
+ )
+ if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS {
+ return info.PebBaseAddress, nil
+ } else {
+ return 0, windows.NTStatus(ret)
+ }
+ } else {
+ return 0, errors.New("can't find API to query 64 bit process from 32 bit")
+ }
+ }
+}
+
+func readProcessMemory(h syscall.Handle, is32BitProcess bool, address uint64, size uint) []byte {
+ if is32BitProcess {
+ var read uint
+
+ buffer := make([]byte, size)
+
+ ret, _, _ := common.ProcNtReadVirtualMemory.Call(
+ uintptr(h),
+ uintptr(address),
+ uintptr(unsafe.Pointer(&buffer[0])),
+ uintptr(size),
+ uintptr(unsafe.Pointer(&read)),
+ )
+ if int(ret) >= 0 && read > 0 {
+ return buffer[:read]
+ }
+ } else {
+ // reading a 64-bit process from a 32-bit one
+ if common.ProcNtWow64ReadVirtualMemory64.Find() == nil { // avoid panic
+ var read uint64
+
+ buffer := make([]byte, size)
+
+ ret, _, _ := common.ProcNtWow64ReadVirtualMemory64.Call(
+ uintptr(h),
+ uintptr(address&0xFFFFFFFF), // the call expects a 64-bit value
+ uintptr(address>>32),
+ uintptr(unsafe.Pointer(&buffer[0])),
+ uintptr(size), // the call expects a 64-bit value
+ uintptr(0), // but size is 32-bit so pass zero as the high dword
+ uintptr(unsafe.Pointer(&read)),
+ )
+ if int(ret) >= 0 && read > 0 {
+ return buffer[:uint(read)]
+ }
+ }
+ }
+
+ // if we reach here, an error happened
+ return nil
+}
diff --git a/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go
new file mode 100644
index 000000000..74c6212cf
--- /dev/null
+++ b/test/integration/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go
@@ -0,0 +1,79 @@
+//go:build (windows && amd64) || (windows && arm64)
+// +build windows,amd64 windows,arm64
+
+package process
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v3/internal/common"
+ "golang.org/x/sys/windows"
+)
+
+type PROCESS_MEMORY_COUNTERS struct {
+ CB uint32
+ PageFaultCount uint32
+ PeakWorkingSetSize uint64
+ WorkingSetSize uint64
+ QuotaPeakPagedPoolUsage uint64
+ QuotaPagedPoolUsage uint64
+ QuotaPeakNonPagedPoolUsage uint64
+ QuotaNonPagedPoolUsage uint64
+ PagefileUsage uint64
+ PeakPagefileUsage uint64
+}
+
+func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, error) {
+ if is32BitProcess {
+ // we are on a 64-bit process reading an external 32-bit process
+ var wow64 uint
+
+ ret, _, _ := common.ProcNtQueryInformationProcess.Call(
+ uintptr(procHandle),
+ uintptr(common.ProcessWow64Information),
+ uintptr(unsafe.Pointer(&wow64)),
+ uintptr(unsafe.Sizeof(wow64)),
+ uintptr(0),
+ )
+ if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS {
+ return uint64(wow64), nil
+ } else {
+ return 0, windows.NTStatus(ret)
+ }
+ } else {
+ // we are on a 64-bit process reading an external 64-bit process
+ var info processBasicInformation64
+
+ ret, _, _ := common.ProcNtQueryInformationProcess.Call(
+ uintptr(procHandle),
+ uintptr(common.ProcessBasicInformation),
+ uintptr(unsafe.Pointer(&info)),
+ uintptr(unsafe.Sizeof(info)),
+ uintptr(0),
+ )
+ if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS {
+ return info.PebBaseAddress, nil
+ } else {
+ return 0, windows.NTStatus(ret)
+ }
+ }
+}
+
+func readProcessMemory(procHandle syscall.Handle, _ bool, address uint64, size uint) []byte {
+ var read uint
+
+ buffer := make([]byte, size)
+
+ ret, _, _ := common.ProcNtReadVirtualMemory.Call(
+ uintptr(procHandle),
+ uintptr(address),
+ uintptr(unsafe.Pointer(&buffer[0])),
+ uintptr(size),
+ uintptr(unsafe.Pointer(&read)),
+ )
+ if int(ret) >= 0 && read > 0 {
+ return buffer[:read]
+ }
+ return nil
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/.gitignore b/test/integration/vendor/github.com/spf13/afero/.gitignore
new file mode 100644
index 000000000..9c1d98611
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/.gitignore
@@ -0,0 +1,2 @@
+sftpfs/file1
+sftpfs/test/
diff --git a/test/integration/vendor/github.com/spf13/afero/LICENSE.txt b/test/integration/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/test/integration/vendor/github.com/spf13/afero/README.md b/test/integration/vendor/github.com/spf13/afero/README.md
new file mode 100644
index 000000000..3bafbfdfc
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/README.md
@@ -0,0 +1,442 @@
+
+
+A FileSystem Abstraction System for Go
+
+[](https://github.com/spf13/afero/actions/workflows/test.yml) [](https://godoc.org/github.com/spf13/afero) [](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+# Overview
+
+Afero is a filesystem framework providing a simple, uniform and universal API
+interacting with any filesystem, as an abstraction layer providing interfaces,
+types and methods. Afero has an exceptionally clean interface and simple design
+without needless constructors or initialization methods.
+
+Afero is also a library providing a base set of interoperable backend
+filesystems that make it easy to work with afero while retaining all the power
+and benefit of the os and ioutil packages.
+
+Afero provides significant improvements over using the os package alone, most
+notably the ability to create mock and testing filesystems without relying on the disk.
+
+It is suitable for use in any situation where you would consider using the OS
+package as it provides an additional abstraction that makes it easy to use a
+memory backed file system during testing. It also adds support for the http
+filesystem for full interoperability.
+
+
+## Afero Features
+
+* A single consistent API for accessing a variety of filesystems
+* Interoperation between a variety of file system types
+* A set of interfaces to encourage and enforce interoperability between backends
+* An atomic cross platform memory backed file system
+* Support for compositional (union) file systems by combining multiple file systems acting as one
+* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
+* A set of utility functions ported from io, ioutil & hugo to be afero aware
+* Wrapper for go 1.16 filesystem abstraction `io/fs.FS`
+
+# Using Afero
+
+Afero is easy to use and easier to adopt.
+
+A few different ways you could use Afero:
+
+* Use the interfaces alone to define your own file system.
+* Wrapper for the OS packages.
+* Define different filesystems for different parts of your application.
+* Use Afero for mock filesystems while testing
+
+## Step 1: Install Afero
+
+First use go get to install the latest version of the library.
+
+ $ go get github.com/spf13/afero
+
+Next include Afero in your application.
+```go
+import "github.com/spf13/afero"
+```
+
+## Step 2: Declare a backend
+
+First define a package variable and set it to a pointer to a filesystem.
+```go
+var AppFs = afero.NewMemMapFs()
+
+or
+
+var AppFs = afero.NewOsFs()
+```
+It is important to note that if you repeat the composite literal you
+will be using a completely new and isolated filesystem. In the case of
+OsFs it will still use the same underlying filesystem but will reduce
+the ability to drop in other filesystems as desired.
+
+## Step 3: Use it like you would the OS package
+
+Throughout your application use any function and method like you normally
+would.
+
+So if my application before had:
+```go
+os.Open("/tmp/foo")
+```
+We would replace it with:
+```go
+AppFs.Open("/tmp/foo")
+```
+
+`AppFs` being the variable we defined above.
+
+
+## List of all available functions
+
+File System Methods Available:
+```go
+Chmod(name string, mode os.FileMode) : error
+Chown(name string, uid, gid int) : error
+Chtimes(name string, atime time.Time, mtime time.Time) : error
+Create(name string) : File, error
+Mkdir(name string, perm os.FileMode) : error
+MkdirAll(path string, perm os.FileMode) : error
+Name() : string
+Open(name string) : File, error
+OpenFile(name string, flag int, perm os.FileMode) : File, error
+Remove(name string) : error
+RemoveAll(path string) : error
+Rename(oldname, newname string) : error
+Stat(name string) : os.FileInfo, error
+```
+File Interfaces and Methods Available:
+```go
+io.Closer
+io.Reader
+io.ReaderAt
+io.Seeker
+io.Writer
+io.WriterAt
+
+Name() : string
+Readdir(count int) : []os.FileInfo, error
+Readdirnames(n int) : []string, error
+Stat() : os.FileInfo, error
+Sync() : error
+Truncate(size int64) : error
+WriteString(s string) : ret int, err error
+```
+In some applications it may make sense to define a new package that
+simply exports the file system variable for easy access from anywhere.
+
+## Using Afero's utility functions
+
+Afero provides a set of functions to make it easier to use the underlying file systems.
+These functions have been primarily ported from io & ioutil with some developed for Hugo.
+
+The afero utilities support all afero compatible backends.
+
+The list of utilities includes:
+
+```go
+DirExists(path string) (bool, error)
+Exists(path string) (bool, error)
+FileContainsBytes(filename string, subslice []byte) (bool, error)
+GetTempDir(subPath string) string
+IsDir(path string) (bool, error)
+IsEmpty(path string) (bool, error)
+ReadDir(dirname string) ([]os.FileInfo, error)
+ReadFile(filename string) ([]byte, error)
+SafeWriteReader(path string, r io.Reader) (err error)
+TempDir(dir, prefix string) (name string, err error)
+TempFile(dir, prefix string) (f File, err error)
+Walk(root string, walkFn filepath.WalkFunc) error
+WriteFile(filename string, data []byte, perm os.FileMode) error
+WriteReader(path string, r io.Reader) (err error)
+```
+For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
+
+They are available under two different approaches to use. You can either call
+them directly where the first parameter of each function will be the file
+system, or you can declare a new `Afero`, a custom type used to bind these
+functions as methods to a given filesystem.
+
+### Calling utilities directly
+
+```go
+fs := new(afero.MemMapFs)
+f, err := afero.TempFile(fs,"", "ioutil-test")
+
+```
+
+### Calling via Afero
+
+```go
+fs := afero.NewMemMapFs()
+afs := &afero.Afero{Fs: fs}
+f, err := afs.TempFile("", "ioutil-test")
+```
+
+## Using Afero for Testing
+
+There is a large benefit to using a mock filesystem for testing. It has a
+completely blank state every time it is initialized and can be easily
+reproducible regardless of OS. You could create files to your heart’s content
+and the file access would be fast while also saving you from all the annoying
+issues with deleting temporary files, Windows file locking, etc. The MemMapFs
+backend is perfect for testing.
+
+* Much faster than performing I/O operations on disk
+* Avoid security issues and permissions
+* Far more control. 'rm -rf /' with confidence
+* Test setup is far more easier to do
+* No test cleanup needed
+
+One way to accomplish this is to define a variable as mentioned above.
+In your application this will be set to afero.NewOsFs() during testing you
+can set it to afero.NewMemMapFs().
+
+It wouldn't be uncommon to have each test initialize a blank slate memory
+backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
+appropriate in my application code. This approach ensures that Tests are order
+independent, with no test relying on the state left by an earlier test.
+
+Then in my tests I would initialize a new MemMapFs for each test:
+```go
+func TestExist(t *testing.T) {
+ appFS := afero.NewMemMapFs()
+ // create test files and directories
+ appFS.MkdirAll("src/a", 0755)
+ afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
+ afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
+ name := "src/c"
+ _, err := appFS.Stat(name)
+ if os.IsNotExist(err) {
+ t.Errorf("file \"%s\" does not exist.\n", name)
+ }
+}
+```
+
+# Available Backends
+
+## Operating System Native
+
+### OsFs
+
+The first is simply a wrapper around the native OS calls. This makes it
+very easy to use as all of the calls are the same as the existing OS
+calls. It also makes it trivial to have your code use the OS during
+operation and a mock filesystem during testing or as needed.
+
+```go
+appfs := afero.NewOsFs()
+appfs.MkdirAll("src/a", 0755)
+```
+
+## Memory Backed Storage
+
+### MemMapFs
+
+Afero also provides a fully atomic memory backed filesystem perfect for use in
+mocking and to speed up unnecessary disk io when persistence isn’t
+necessary. It is fully concurrent and will work within go routines
+safely.
+
+```go
+mm := afero.NewMemMapFs()
+mm.MkdirAll("src/a", 0755)
+```
+
+#### InMemoryFile
+
+As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
+backed file implementation. This can be used in other memory backed file
+systems with ease. Plans are to add a radix tree memory stored file
+system using InMemoryFile.
+
+## Network Interfaces
+
+### SftpFs
+
+Afero has experimental support for secure file transfer protocol (sftp). Which can
+be used to perform file operations over a encrypted channel.
+
+### GCSFs
+
+Afero has experimental support for Google Cloud Storage (GCS). You can either set the
+`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in
+`NewGcsFS` to configure access to your GCS bucket.
+
+Some known limitations of the existing implementation:
+* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version.
+* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version.
+* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent.
+
+
+## Filtering Backends
+
+### BasePathFs
+
+The BasePathFs restricts all operations to a given path within an Fs.
+The given file name to the operations on this Fs will be prepended with
+the base path before calling the source Fs.
+
+```go
+bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
+```
+
+### ReadOnlyFs
+
+A thin wrapper around the source Fs providing a read only view.
+
+```go
+fs := afero.NewReadOnlyFs(afero.NewOsFs())
+_, err := fs.Create("/file.txt")
+// err = syscall.EPERM
+```
+
+# RegexpFs
+
+A filtered view on file names, any file NOT matching
+the passed regexp will be treated as non-existing.
+Files not matching the regexp provided will not be created.
+Directories are not filtered.
+
+```go
+fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
+_, err := fs.Create("/file.html")
+// err = syscall.ENOENT
+```
+
+### HttpFs
+
+Afero provides an http compatible backend which can wrap any of the existing
+backends.
+
+The Http package requires a slightly specific version of Open which
+returns an http.File type.
+
+Afero provides an httpFs file system which satisfies this requirement.
+Any Afero FileSystem can be used as an httpFs.
+
+```go
+httpFs := afero.NewHttpFs()
+fileserver := http.FileServer(httpFs.Dir())
+http.Handle("/", fileserver)
+```
+
+## Composite Backends
+
+Afero provides the ability have two filesystems (or more) act as a single
+file system.
+
+### CacheOnReadFs
+
+The CacheOnReadFs will lazily make copies of any accessed files from the base
+layer into the overlay. Subsequent reads will be pulled from the overlay
+directly permitting the request is within the cache duration of when it was
+created in the overlay.
+
+If the base filesystem is writeable, any changes to files will be
+done first to the base, then to the overlay layer. Write calls to open file
+handles like `Write()` or `Truncate()` to the overlay first.
+
+To writing files to the overlay only, you can use the overlay Fs directly (not
+via the union Fs).
+
+Cache files in the layer for the given time.Duration, a cache duration of 0
+means "forever" meaning the file will not be re-requested from the base ever.
+
+A read-only base will make the overlay also read-only but still copy files
+from the base to the overlay when they're not present (or outdated) in the
+caching layer.
+
+```go
+base := afero.NewOsFs()
+layer := afero.NewMemMapFs()
+ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
+```
+
+### CopyOnWriteFs()
+
+The CopyOnWriteFs is a read only base file system with a potentially
+writeable layer on top.
+
+Read operations will first look in the overlay and if not found there, will
+serve the file from the base.
+
+Changes to the file system will only be made in the overlay.
+
+Any attempt to modify a file found only in the base will copy the file to the
+overlay layer before modification (including opening a file with a writable
+handle).
+
+Removing and Renaming files present only in the base layer is not currently
+permitted. If a file is present in the base layer and the overlay, only the
+overlay will be removed/renamed.
+
+```go
+ base := afero.NewOsFs()
+ roBase := afero.NewReadOnlyFs(base)
+ ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
+
+ fh, _ = ufs.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+```
+
+In this example all write operations will only occur in memory (MemMapFs)
+leaving the base filesystem (OsFs) untouched.
+
+
+## Desired/possible backends
+
+The following is a short list of possible backends we hope someone will
+implement:
+
+* SSH
+* S3
+
+# About the project
+
+## What's in the name
+
+Afero comes from the latin roots Ad-Facere.
+
+**"Ad"** is a prefix meaning "to".
+
+**"Facere"** is a form of the root "faciō" making "make or do".
+
+The literal meaning of afero is "to make" or "to do" which seems very fitting
+for a library that allows one to make files and directories and do things with them.
+
+The English word that shares the same roots as Afero is "affair". Affair shares
+the same concept but as a noun it means "something that is made or done" or "an
+object of a particular type".
+
+It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
+Googles very well.
+
+## Release Notes
+
+See the [Releases Page](https://github.com/spf13/afero/releases).
+
+## Contributing
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Add some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Contributors
+
+Names in no particular order:
+
+* [spf13](https://github.com/spf13)
+* [jaqx0r](https://github.com/jaqx0r)
+* [mbertschler](https://github.com/mbertschler)
+* [xor-gate](https://github.com/xor-gate)
+
+## License
+
+Afero is released under the Apache 2.0 license. See
+[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
diff --git a/test/integration/vendor/github.com/spf13/afero/afero.go b/test/integration/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 000000000..199480cd0
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,111 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+ "errors"
+ "io"
+ "os"
+ "time"
+)
+
+type Afero struct {
+ Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+ io.Closer
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+ io.Writer
+ io.WriterAt
+
+ Name() string
+ Readdir(count int) ([]os.FileInfo, error)
+ Readdirnames(n int) ([]string, error)
+ Stat() (os.FileInfo, error)
+ Sync() error
+ Truncate(size int64) error
+ WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+ // Create creates a file in the filesystem, returning the file and an
+ // error, if any happens.
+ Create(name string) (File, error)
+
+ // Mkdir creates a directory in the filesystem, return an error if any
+ // happens.
+ Mkdir(name string, perm os.FileMode) error
+
+ // MkdirAll creates a directory path and all parents that does not exist
+ // yet.
+ MkdirAll(path string, perm os.FileMode) error
+
+ // Open opens a file, returning it or an error, if any happens.
+ Open(name string) (File, error)
+
+ // OpenFile opens a file using the given flags and the given mode.
+ OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+ // Remove removes a file identified by name, returning an error, if any
+ // happens.
+ Remove(name string) error
+
+ // RemoveAll removes a directory path and any children it contains. It
+ // does not fail if the path does not exist (return nil).
+ RemoveAll(path string) error
+
+ // Rename renames a file.
+ Rename(oldname, newname string) error
+
+ // Stat returns a FileInfo describing the named file, or an error, if any
+ // happens.
+ Stat(name string) (os.FileInfo, error)
+
+ // The name of this FileSystem
+ Name() string
+
+ // Chmod changes the mode of the named file to mode.
+ Chmod(name string, mode os.FileMode) error
+
+ // Chown changes the uid and gid of the named file.
+ Chown(name string, uid, gid int) error
+
+ //Chtimes changes the access and modification times of the named file
+ Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("out of range")
+ ErrTooLarge = errors.New("too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/test/integration/vendor/github.com/spf13/afero/appveyor.yml b/test/integration/vendor/github.com/spf13/afero/appveyor.yml
new file mode 100644
index 000000000..65e20e8ca
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/appveyor.yml
@@ -0,0 +1,10 @@
+# This currently does nothing. We have moved to GitHub action, but this is kept
+# until spf13 has disabled this project in AppVeyor.
+version: '{build}'
+clone_folder: C:\gopath\src\github.com\spf13\afero
+environment:
+ GOPATH: C:\gopath
+build_script:
+- cmd: >-
+ go version
+
diff --git a/test/integration/vendor/github.com/spf13/afero/basepath.go b/test/integration/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 000000000..70a1d9168
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,223 @@
+package afero
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+var (
+ _ Lstater = (*BasePathFs)(nil)
+ _ fs.ReadDirFile = (*BasePathFile)(nil)
+)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+ source Fs
+ path string
+}
+
+type BasePathFile struct {
+ File
+ path string
+}
+
+func (f *BasePathFile) Name() string {
+ sourcename := f.File.Name()
+ return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
+}
+
+func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) {
+ if rdf, ok := f.File.(fs.ReadDirFile); ok {
+ return rdf.ReadDir(n)
+
+ }
+ return readDirFile{f.File}.ReadDir(n)
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+ return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+ if err := validateBasePathName(name); err != nil {
+ return name, err
+ }
+
+ bpath := filepath.Clean(b.path)
+ path = filepath.Clean(filepath.Join(bpath, name))
+ if !strings.HasPrefix(path, bpath) {
+ return name, os.ErrNotExist
+ }
+
+ return path, nil
+}
+
+func validateBasePathName(name string) error {
+ if runtime.GOOS != "windows" {
+ // Not much to do here;
+ // the virtual file paths all look absolute on *nix.
+ return nil
+ }
+
+ // On Windows a common mistake would be to provide an absolute OS path
+ // We could strip out the base part, but that would not be very portable.
+ if filepath.IsAbs(name) {
+ return os.ErrNotExist
+ }
+
+ return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chtimes", Path: name, Err: err}
+ }
+ return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chmod", Path: name, Err: err}
+ }
+ return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Chown(name string, uid, gid int) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chown", Path: name, Err: err}
+ }
+ return b.source.Chown(name, uid, gid)
+}
+
+func (b *BasePathFs) Name() string {
+ return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+ }
+ return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+ if oldname, err = b.RealPath(oldname); err != nil {
+ return &os.PathError{Op: "rename", Path: oldname, Err: err}
+ }
+ if newname, err = b.RealPath(newname); err != nil {
+ return &os.PathError{Op: "rename", Path: newname, Err: err}
+ }
+ return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove_all", Path: name, Err: err}
+ }
+ return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+ }
+ sourcef, err := b.source.OpenFile(name, flag, mode)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{sourcef, b.path}, nil
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "open", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "create", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
+ }
+ if lstater, ok := b.source.(Lstater); ok {
+ return lstater.LstatIfPossible(name)
+ }
+ fi, err := b.source.Stat(name)
+ return fi, false, err
+}
+
+func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error {
+ oldname, err := b.RealPath(oldname)
+ if err != nil {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
+ }
+ newname, err = b.RealPath(newname)
+ if err != nil {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
+ }
+ if linker, ok := b.source.(Linker); ok {
+ return linker.SymlinkIfPossible(oldname, newname)
+ }
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return "", &os.PathError{Op: "readlink", Path: name, Err: err}
+ }
+ if reader, ok := b.source.(LinkReader); ok {
+ return reader.ReadlinkIfPossible(name)
+ }
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/cacheOnReadFs.go b/test/integration/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 000000000..017d344fd
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,315 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+ base Fs
+ layer Fs
+ cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+ return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+ // not present in the overlay, unknown if it exists in the base:
+ cacheMiss cacheState = iota
+ // present in the overlay and in base, base file is newer:
+ cacheStale
+ // present in the overlay - with cache time == 0 it may exist in the base,
+ // with cacheTime > 0 it exists in the base and is same age or newer in the
+ // overlay
+ cacheHit
+ // happens if someone writes directly to the overlay without
+ // going through this union
+ cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+ var lfi, bfi os.FileInfo
+ lfi, err = u.layer.Stat(name)
+ if err == nil {
+ if u.cacheTime == 0 {
+ return cacheHit, lfi, nil
+ }
+ if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+ bfi, err = u.base.Stat(name)
+ if err != nil {
+ return cacheLocal, lfi, nil
+ }
+ if bfi.ModTime().After(lfi.ModTime()) {
+ return cacheStale, bfi, nil
+ }
+ }
+ return cacheHit, lfi, nil
+ }
+
+ if err == syscall.ENOENT || os.IsNotExist(err) {
+ return cacheMiss, nil, nil
+ }
+
+ return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error {
+ return copyFileToLayer(u.base, u.layer, name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chtimes(name, atime, mtime)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chtimes(name, atime, mtime)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chmod(name, mode)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chmod(name, mode)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Chown(name string, uid, gid int) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chown(name, uid, gid)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chown(name, uid, gid)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chown(name, uid, gid)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheMiss:
+ return u.base.Stat(name)
+ default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+ return fi, nil
+ }
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+ st, _, err := u.cacheStatus(oldname)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Rename(oldname, newname)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(oldname); err != nil {
+ return err
+ }
+ err = u.base.Rename(oldname, newname)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.Remove(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.RemoveAll(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheLocal, cacheHit:
+ default:
+ if err := u.copyFileToLayer(name, flag, perm); err != nil {
+ return nil, err
+ }
+ }
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ bfi, err := u.base.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ lfi, err := u.layer.OpenFile(name, flag, perm)
+ if err != nil {
+ bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+ return nil, err
+ }
+ return &UnionFile{Base: bfi, Layer: lfi}, nil
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+
+ switch st {
+ case cacheLocal:
+ return u.layer.Open(name)
+
+ case cacheMiss:
+ bfi, err := u.base.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if bfi.IsDir() {
+ return u.base.Open(name)
+ }
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+
+ case cacheStale:
+ if !fi.IsDir() {
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+ }
+ case cacheHit:
+ if !fi.IsDir() {
+ return u.layer.Open(name)
+ }
+ }
+ // the dirs from cacheHit, cacheStale fall down here:
+ bfile, _ := u.base.Open(name)
+ lfile, err := u.layer.Open(name)
+ if err != nil && bfile == nil {
+ return nil, err
+ }
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+ err := u.base.Mkdir(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+ return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+ err := u.base.MkdirAll(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+ bfh, err := u.base.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ lfh, err := u.layer.Create(name)
+ if err != nil {
+ // oops, see comment about OS_TRUNC above, should we remove? then we have to
+ // remember if the file did not exist before
+ bfh.Close()
+ return nil, err
+ }
+ return &UnionFile{Base: bfh, Layer: lfh}, nil
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/const_bsds.go b/test/integration/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 000000000..eed0f225f
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,23 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly
+// +build aix darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/test/integration/vendor/github.com/spf13/afero/const_win_unix.go b/test/integration/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 000000000..004d57e2f
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix
+// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/test/integration/vendor/github.com/spf13/afero/copyOnWriteFs.go b/test/integration/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 000000000..6ff8f3099
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,326 @@
+package afero
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*CopyOnWriteFs)(nil)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes(), Chmod() and Chown()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+ base Fs
+ layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+ return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+ if _, err := u.layer.Stat(name); err == nil {
+ return false, nil
+ }
+ _, err := u.base.Stat(name)
+ if err != nil {
+ if oerr, ok := err.(*os.PathError); ok {
+ if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+ return false, nil
+ }
+ }
+ if err == syscall.ENOENT {
+ return false, nil
+ }
+ }
+ return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chown(name, uid, gid)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+ fi, err := u.layer.Stat(name)
+ if err != nil {
+ isNotExist := u.isNotExist(err)
+ if isNotExist {
+ return u.base.Stat(name)
+ }
+ return nil, err
+ }
+ return fi, nil
+}
+
+func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ llayer, ok1 := u.layer.(Lstater)
+ lbase, ok2 := u.base.(Lstater)
+
+ if ok1 {
+ fi, b, err := llayer.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ if ok2 {
+ fi, b, err := lbase.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ fi, err := u.Stat(name)
+
+ return fi, false, err
+}
+
+func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error {
+ if slayer, ok := u.layer.(Linker); ok {
+ return slayer.SymlinkIfPossible(oldname, newname)
+ }
+
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) {
+ if rlayer, ok := u.layer.(LinkReader); ok {
+ return rlayer.ReadlinkIfPossible(name)
+ }
+
+ if rbase, ok := u.base.(LinkReader); ok {
+ return rbase.ReadlinkIfPossible(name)
+ }
+
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
+func (u *CopyOnWriteFs) isNotExist(err error) bool {
+ if e, ok := err.(*os.PathError); ok {
+ err = e.Err
+ }
+ if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
+ return true
+ }
+ return false
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+ b, err := u.isBaseFile(oldname)
+ if err != nil {
+ return err
+ }
+ if b {
+ return syscall.EPERM
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+ err := u.layer.Remove(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+ err := u.layer.RemoveAll(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ if b {
+ if err = u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ dir := filepath.Dir(name)
+ isaDir, err := IsDir(u.base, dir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if isaDir {
+ if err = u.layer.MkdirAll(dir, 0777); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ isaDir, err = IsDir(u.layer, dir)
+ if err != nil {
+ return nil, err
+ }
+ if isaDir {
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+ }
+ if b {
+ return u.base.OpenFile(name, flag, perm)
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+// layer: doesn't exist, exists as a file, and exists as a directory
+// base: doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+ // Since the overlay overrides the base we check that first
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If overlay doesn't exist, return the base (base state irrelevant)
+ if b {
+ return u.base.Open(name)
+ }
+
+ // If overlay is a file, return it (base state irrelevant)
+ dir, err := IsDir(u.layer, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ return u.layer.Open(name)
+ }
+
+ // Overlay is a directory, base state now matters.
+ // Base state has 3 states to check but 2 outcomes:
+ // A. It's a file or non-readable in the base (return just the overlay)
+ // B. It's an accessible directory in the base (return a UnionFile)
+
+ // If base is file or nonreadable, return overlay
+ dir, err = IsDir(u.base, name)
+ if !dir || err != nil {
+ return u.layer.Open(name)
+ }
+
+ // Both base & layer are directories
+ // Return union file (if opens are without error)
+ bfile, bErr := u.base.Open(name)
+ lfile, lErr := u.layer.Open(name)
+
+ // If either have errors at this point something is very wrong. Return nil and the errors
+ if bErr != nil || lErr != nil {
+ return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+ }
+
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return ErrFileExists
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+ return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ // This is in line with how os.MkdirAll behaves.
+ return nil
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+ return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/httpFs.go b/test/integration/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 000000000..ac0de6d51
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,114 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type httpDir struct {
+ basePath string
+ fs HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+ if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) ||
+ strings.Contains(name, "\x00") {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ dir := string(d.basePath)
+ if dir == "" {
+ dir = "."
+ }
+
+ f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type HttpFs struct {
+ source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+ return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+ return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+ return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+ return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chown(name string, uid, gid int) error {
+ return h.source.Chown(name, uid, gid)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+ return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+ return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+ f, err := h.source.Open(name)
+ if err == nil {
+ if httpfile, ok := f.(http.File); ok {
+ return httpfile, nil
+ }
+ }
+ return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+ return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+ return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+ return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+ return h.source.Stat(name)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/internal/common/adapters.go b/test/integration/vendor/github.com/spf13/afero/internal/common/adapters.go
new file mode 100644
index 000000000..60685caa5
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/internal/common/adapters.go
@@ -0,0 +1,27 @@
+// Copyright © 2022 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import "io/fs"
+
+// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry
+type FileInfoDirEntry struct {
+ fs.FileInfo
+}
+
+var _ fs.DirEntry = FileInfoDirEntry{}
+
+func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
+
+func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }
diff --git a/test/integration/vendor/github.com/spf13/afero/iofs.go b/test/integration/vendor/github.com/spf13/afero/iofs.go
new file mode 100644
index 000000000..938b9316e
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/iofs.go
@@ -0,0 +1,298 @@
+//go:build go1.16
+// +build go1.16
+
+package afero
+
+import (
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "sort"
+ "time"
+
+ "github.com/spf13/afero/internal/common"
+)
+
+// IOFS adopts afero.Fs to stdlib io/fs.FS
+type IOFS struct {
+ Fs
+}
+
+func NewIOFS(fs Fs) IOFS {
+ return IOFS{Fs: fs}
+}
+
+var (
+ _ fs.FS = IOFS{}
+ _ fs.GlobFS = IOFS{}
+ _ fs.ReadDirFS = IOFS{}
+ _ fs.ReadFileFS = IOFS{}
+ _ fs.StatFS = IOFS{}
+ _ fs.SubFS = IOFS{}
+)
+
+func (iofs IOFS) Open(name string) (fs.File, error) {
+ const op = "open"
+
+ // by convention for fs.FS implementations we should perform this check
+ if !fs.ValidPath(name) {
+ return nil, iofs.wrapError(op, name, fs.ErrInvalid)
+ }
+
+ file, err := iofs.Fs.Open(name)
+ if err != nil {
+ return nil, iofs.wrapError(op, name, err)
+ }
+
+ // file should implement fs.ReadDirFile
+ if _, ok := file.(fs.ReadDirFile); !ok {
+ file = readDirFile{file}
+ }
+
+ return file, nil
+}
+
+func (iofs IOFS) Glob(pattern string) ([]string, error) {
+ const op = "glob"
+
+ // afero.Glob does not perform this check but it's required for implementations
+ if _, err := path.Match(pattern, ""); err != nil {
+ return nil, iofs.wrapError(op, pattern, err)
+ }
+
+ items, err := Glob(iofs.Fs, pattern)
+ if err != nil {
+ return nil, iofs.wrapError(op, pattern, err)
+ }
+
+ return items, nil
+}
+
+func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) {
+ f, err := iofs.Fs.Open(name)
+ if err != nil {
+ return nil, iofs.wrapError("readdir", name, err)
+ }
+
+ defer f.Close()
+
+ if rdf, ok := f.(fs.ReadDirFile); ok {
+ items, err := rdf.ReadDir(-1)
+ if err != nil {
+ return nil, iofs.wrapError("readdir", name, err)
+ }
+ sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() })
+ return items, nil
+ }
+
+ items, err := f.Readdir(-1)
+ if err != nil {
+ return nil, iofs.wrapError("readdir", name, err)
+ }
+ sort.Sort(byName(items))
+
+ ret := make([]fs.DirEntry, len(items))
+ for i := range items {
+ ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
+ }
+
+ return ret, nil
+}
+
+func (iofs IOFS) ReadFile(name string) ([]byte, error) {
+ const op = "readfile"
+
+ if !fs.ValidPath(name) {
+ return nil, iofs.wrapError(op, name, fs.ErrInvalid)
+ }
+
+ bytes, err := ReadFile(iofs.Fs, name)
+ if err != nil {
+ return nil, iofs.wrapError(op, name, err)
+ }
+
+ return bytes, nil
+}
+
+func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil }
+
+func (IOFS) wrapError(op, path string, err error) error {
+ if _, ok := err.(*fs.PathError); ok {
+ return err // don't need to wrap again
+ }
+
+ return &fs.PathError{
+ Op: op,
+ Path: path,
+ Err: err,
+ }
+}
+
+// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open
+type readDirFile struct {
+ File
+}
+
+var _ fs.ReadDirFile = readDirFile{}
+
+func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
+ items, err := r.File.Readdir(n)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]fs.DirEntry, len(items))
+ for i := range items {
+ ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
+ }
+
+ return ret, nil
+}
+
+// FromIOFS adopts io/fs.FS to use it as afero.Fs
+// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission
+// To store modifications you may use afero.CopyOnWriteFs
+type FromIOFS struct {
+ fs.FS
+}
+
+var _ Fs = FromIOFS{}
+
+func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) }
+
+func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) }
+
+func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error {
+ return notImplemented("mkdirall", path)
+}
+
+func (f FromIOFS) Open(name string) (File, error) {
+ file, err := f.FS.Open(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return fromIOFSFile{File: file, name: name}, nil
+}
+
+func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return f.Open(name)
+}
+
+func (f FromIOFS) Remove(name string) error {
+ return notImplemented("remove", name)
+}
+
+func (f FromIOFS) RemoveAll(path string) error {
+ return notImplemented("removeall", path)
+}
+
+func (f FromIOFS) Rename(oldname, newname string) error {
+ return notImplemented("rename", oldname)
+}
+
+func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) }
+
+func (f FromIOFS) Name() string { return "fromiofs" }
+
+func (f FromIOFS) Chmod(name string, mode os.FileMode) error {
+ return notImplemented("chmod", name)
+}
+
+func (f FromIOFS) Chown(name string, uid, gid int) error {
+ return notImplemented("chown", name)
+}
+
+func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return notImplemented("chtimes", name)
+}
+
+type fromIOFSFile struct {
+ fs.File
+ name string
+}
+
+func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) {
+ readerAt, ok := f.File.(io.ReaderAt)
+ if !ok {
+ return -1, notImplemented("readat", f.name)
+ }
+
+ return readerAt.ReadAt(p, off)
+}
+
+func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) {
+ seeker, ok := f.File.(io.Seeker)
+ if !ok {
+ return -1, notImplemented("seek", f.name)
+ }
+
+ return seeker.Seek(offset, whence)
+}
+
+func (f fromIOFSFile) Write(p []byte) (n int, err error) {
+ return -1, notImplemented("write", f.name)
+}
+
+func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) {
+ return -1, notImplemented("writeat", f.name)
+}
+
+func (f fromIOFSFile) Name() string { return f.name }
+
+func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) {
+ rdfile, ok := f.File.(fs.ReadDirFile)
+ if !ok {
+ return nil, notImplemented("readdir", f.name)
+ }
+
+ entries, err := rdfile.ReadDir(count)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]os.FileInfo, len(entries))
+ for i := range entries {
+ ret[i], err = entries[i].Info()
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return ret, nil
+}
+
+func (f fromIOFSFile) Readdirnames(n int) ([]string, error) {
+ rdfile, ok := f.File.(fs.ReadDirFile)
+ if !ok {
+ return nil, notImplemented("readdir", f.name)
+ }
+
+ entries, err := rdfile.ReadDir(n)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := make([]string, len(entries))
+ for i := range entries {
+ ret[i] = entries[i].Name()
+ }
+
+ return ret, nil
+}
+
+func (f fromIOFSFile) Sync() error { return nil }
+
+func (f fromIOFSFile) Truncate(size int64) error {
+ return notImplemented("truncate", f.name)
+}
+
+func (f fromIOFSFile) WriteString(s string) (ret int, err error) {
+ return -1, notImplemented("writestring", f.name)
+}
+
+func notImplemented(op, path string) error {
+ return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission}
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/ioutil.go b/test/integration/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 000000000..386c9cdc2
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,240 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+ return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ list, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(byName(list))
+ return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+ return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ // It's a good but not certain bet that FileInfo will tell us exactly how much to
+ // read, so let's try it but be prepared for the answer to be wrong.
+ var n int64
+
+ if fi, err := f.Stat(); err == nil {
+ // Don't preallocate a huge buffer, just in case.
+ if size := fi.Size(); size < 1e9 {
+ n = size
+ }
+ }
+ // As initial capacity for readAll, use n + a little extra in case Size is zero,
+ // and to avoid another allocation after Read has filled the buffer. The readAll
+ // call will read into its allocated internal buffer cheaply. If the size was
+ // wrong, we'll either waste some space off the end or reallocate as needed, but
+ // in the overwhelmingly common case we'll get it just right.
+ return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, capacity))
+ // If the buffer overflows, we will get bytes.ErrTooLarge.
+ // Return that as an error. Any other panic remains.
+ defer func() {
+ e := recover()
+ if e == nil {
+ return
+ }
+ if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+ err = panicErr
+ } else {
+ panic(e)
+ }
+ }()
+ _, err = buf.ReadFrom(r)
+ return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+ f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var randNum uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextRandom() string {
+ randmu.Lock()
+ r := randNum
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ randNum = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir,
+// opens the file for reading and writing, and returns the resulting *os.File.
+// The filename is generated by taking pattern and adding a random
+// string to the end. If pattern includes a "*", the random string
+// replaces the last "*".
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, pattern string) (f File, err error) {
+ return TempFile(a.Fs, dir, pattern)
+}
+
+func TempFile(fs Fs, dir, pattern string) (f File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ var prefix, suffix string
+ if pos := strings.LastIndex(pattern, "*"); pos != -1 {
+ prefix, suffix = pattern[:pos], pattern[pos+1:]
+ } else {
+ prefix = pattern
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextRandom()+suffix)
+ f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ randNum = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory. If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory. It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+ return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ try := filepath.Join(dir, prefix+nextRandom())
+ err = fs.Mkdir(try, 0700)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ randNum = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ if err == nil {
+ name = try
+ }
+ break
+ }
+ return
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/lstater.go b/test/integration/vendor/github.com/spf13/afero/lstater.go
new file mode 100644
index 000000000..89c1bfc0a
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/lstater.go
@@ -0,0 +1,27 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+)
+
+// Lstater is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
+// Else it will call Stat.
+// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
+type Lstater interface {
+ LstatIfPossible(name string) (os.FileInfo, bool, error)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/match.go b/test/integration/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 000000000..7db4b7de6
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ // Lstat not supported by a ll filesystems.
+ if _, err = lstatIfPossible(fs, pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ switch dir {
+ case "":
+ dir = "."
+ case string(filepath.Separator):
+ // nothing
+ default:
+ dir = dir[0 : len(dir)-1] // chop off trailing separator
+ }
+
+ if !hasMeta(dir) {
+ return glob(fs, dir, file, nil)
+ }
+
+ var m []string
+ m, err = Glob(fs, dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(fs, d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := fs.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ d, err := fs.Open(dir)
+ if err != nil {
+ return
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ // TODO(niemeyer): Should other magic characters be added here?
+ return strings.ContainsAny(path, "*?[")
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/mem/dir.go b/test/integration/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 000000000..e104013f4
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+ Len() int
+ Names() []string
+ Files() []*FileData
+ Add(*FileData)
+ Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+ if d.memDir == nil {
+ d.dir = true
+ d.memDir = &DirMap{}
+ }
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/mem/dirmap.go b/test/integration/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 000000000..03a57ee5b
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int { return len(m) }
+func (m DirMap) Add(f *FileData) { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+ for _, f := range m {
+ files = append(files, f)
+ }
+ sort.Sort(filesSorter(files))
+ return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int { return len(s) }
+func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+ for x := range m {
+ names = append(names, x)
+ }
+ return names
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/mem/file.go b/test/integration/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 000000000..3cf4693b5
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,356 @@
+// Copyright © 2015 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/spf13/afero/internal/common"
+)
+
+const FilePathSeparator = string(filepath.Separator)
+
+var _ fs.ReadDirFile = &File{}
+
+type File struct {
+ // atomic requires 64-bit alignment for struct field access
+ at int64
+ readDirCount int64
+ closed bool
+ readOnly bool
+ fileData *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+ return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+ return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+ return f.fileData
+}
+
+type FileData struct {
+ sync.Mutex
+ name string
+ data []byte
+ memDir Dir
+ dir bool
+ mode os.FileMode
+ modtime time.Time
+ uid int
+ gid int
+}
+
+func (d *FileData) Name() string {
+ d.Lock()
+ defer d.Unlock()
+ return d.name
+}
+
+func CreateFile(name string) *FileData {
+ return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+ return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+ f.Lock()
+ f.name = newname
+ f.Unlock()
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+ f.Lock()
+ f.mode = mode
+ f.Unlock()
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+ f.Lock()
+ setModTime(f, mtime)
+ f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
+ f.modtime = mtime
+}
+
+func SetUID(f *FileData, uid int) {
+ f.Lock()
+ f.uid = uid
+ f.Unlock()
+}
+
+func SetGID(f *FileData, gid int) {
+ f.Lock()
+ f.gid = gid
+ f.Unlock()
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+ return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+ atomic.StoreInt64(&f.at, 0)
+ atomic.StoreInt64(&f.readDirCount, 0)
+ f.fileData.Lock()
+ f.closed = false
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Close() error {
+ f.fileData.Lock()
+ f.closed = true
+ if !f.readOnly {
+ setModTime(f.fileData, time.Now())
+ }
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Name() string {
+ return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+ return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+ return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+ if !f.fileData.dir {
+ return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
+ }
+ var outLength int64
+
+ f.fileData.Lock()
+ files := f.fileData.memDir.Files()[f.readDirCount:]
+ if count > 0 {
+ if len(files) < count {
+ outLength = int64(len(files))
+ } else {
+ outLength = int64(count)
+ }
+ if len(files) == 0 {
+ err = io.EOF
+ }
+ } else {
+ outLength = int64(len(files))
+ }
+ f.readDirCount += outLength
+ f.fileData.Unlock()
+
+ res = make([]os.FileInfo, outLength)
+ for i := range res {
+ res[i] = &FileInfo{files[i]}
+ }
+
+ return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ fi, err := f.Readdir(n)
+ names = make([]string, len(fi))
+ for i, f := range fi {
+ _, names[i] = filepath.Split(f.Name())
+ }
+ return names, err
+}
+
+// Implements fs.ReadDirFile
+func (f *File) ReadDir(n int) ([]fs.DirEntry, error) {
+ fi, err := f.Readdir(n)
+ if err != nil {
+ return nil, err
+ }
+ di := make([]fs.DirEntry, len(fi))
+ for i, f := range fi {
+ di[i] = common.FileInfoDirEntry{FileInfo: f}
+ }
+ return di, nil
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if f.closed {
+ return 0, ErrFileClosed
+ }
+ if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+ return 0, io.EOF
+ }
+ if int(f.at) > len(f.fileData.data) {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if len(f.fileData.data)-int(f.at) >= len(b) {
+ n = len(b)
+ } else {
+ n = len(f.fileData.data) - int(f.at)
+ }
+ copy(b, f.fileData.data[f.at:f.at+int64(n)])
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ prev := atomic.LoadInt64(&f.at)
+ atomic.StoreInt64(&f.at, off)
+ n, err = f.Read(b)
+ atomic.StoreInt64(&f.at, prev)
+ return
+}
+
+func (f *File) Truncate(size int64) error {
+ if f.closed {
+ return ErrFileClosed
+ }
+ if f.readOnly {
+ return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ if size < 0 {
+ return ErrOutOfRange
+ }
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if size > int64(len(f.fileData.data)) {
+ diff := size - int64(len(f.fileData.data))
+ f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+ } else {
+ f.fileData.data = f.fileData.data[0:size]
+ }
+ setModTime(f.fileData, time.Now())
+ return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ if f.closed {
+ return 0, ErrFileClosed
+ }
+ switch whence {
+ case io.SeekStart:
+ atomic.StoreInt64(&f.at, offset)
+ case io.SeekCurrent:
+ atomic.AddInt64(&f.at, offset)
+ case io.SeekEnd:
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+ }
+ return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+ if f.closed {
+ return 0, ErrFileClosed
+ }
+ if f.readOnly {
+ return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ n = len(b)
+ cur := atomic.LoadInt64(&f.at)
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ diff := cur - int64(len(f.fileData.data))
+ var tail []byte
+ if n+int(cur) < len(f.fileData.data) {
+ tail = f.fileData.data[n+int(cur):]
+ }
+ if diff > 0 {
+ f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ } else {
+ f.fileData.data = append(f.fileData.data[:cur], b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ }
+ setModTime(f.fileData, time.Now())
+
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+ return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+ return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+ *FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+ s.Lock()
+ _, name := filepath.Split(s.name)
+ s.Unlock()
+ return name
+}
+func (s *FileInfo) Mode() os.FileMode {
+ s.Lock()
+ defer s.Unlock()
+ return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+ s.Lock()
+ defer s.Unlock()
+ return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+ s.Lock()
+ defer s.Unlock()
+ return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+ if s.IsDir() {
+ return int64(42)
+ }
+ s.Lock()
+ defer s.Unlock()
+ return int64(len(s.data))
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("out of range")
+ ErrTooLarge = errors.New("too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/test/integration/vendor/github.com/spf13/afero/memmap.go b/test/integration/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 000000000..ea0798d87
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,404 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spf13/afero/mem"
+)
+
+const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod()
+
+type MemMapFs struct {
+ mu sync.RWMutex
+ data map[string]*mem.FileData
+ init sync.Once
+}
+
+func NewMemMapFs() Fs {
+ return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+ m.init.Do(func() {
+ m.data = make(map[string]*mem.FileData)
+ // Root should always exist, right?
+ // TODO: what about windows?
+ root := mem.CreateDir(FilePathSeparator)
+ mem.SetMode(root, os.ModeDir|0755)
+ m.data[FilePathSeparator] = root
+ })
+ return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+ name = normalizePath(name)
+ m.mu.Lock()
+ file := mem.CreateFile(name)
+ m.getData()[name] = file
+ m.registerWithParent(file, 0)
+ m.mu.Unlock()
+ return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+ f, err := m.lockfreeOpen(fileName)
+ if err != nil {
+ return err
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ log.Panic("parent of ", f.Name(), " is nil")
+ }
+
+ parent.Lock()
+ mem.RemoveFromMemDir(parent, f)
+ parent.Unlock()
+ return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+ pdir, _ := filepath.Split(f.Name())
+ pdir = filepath.Clean(pdir)
+ pfile, err := m.lockfreeOpen(pdir)
+ if err != nil {
+ return nil
+ }
+ return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) {
+ if f == nil {
+ return
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ pdir := filepath.Dir(filepath.Clean(f.Name()))
+ err := m.lockfreeMkdir(pdir, perm)
+ if err != nil {
+ //log.Println("Mkdir error:", err)
+ return
+ }
+ parent, err = m.lockfreeOpen(pdir)
+ if err != nil {
+ //log.Println("Open after Mkdir error:", err)
+ return
+ }
+ }
+
+ parent.Lock()
+ mem.InitializeDir(parent)
+ mem.AddToMemDir(parent, f)
+ parent.Unlock()
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+ x, ok := m.getData()[name]
+ if ok {
+ // Only return ErrFileExists if it's a file, not a directory.
+ i := mem.FileInfo{FileData: x}
+ if !i.IsDir() {
+ return ErrFileExists
+ }
+ } else {
+ item := mem.CreateDir(name)
+ mem.SetMode(item, os.ModeDir|perm)
+ m.getData()[name] = item
+ m.registerWithParent(item, perm)
+ }
+ return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+ perm &= chmodBits
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ _, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if ok {
+ return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+ }
+
+ m.mu.Lock()
+ item := mem.CreateDir(name)
+ mem.SetMode(item, os.ModeDir|perm)
+ m.getData()[name] = item
+ m.registerWithParent(item, perm)
+ m.mu.Unlock()
+
+ return m.setFileMode(name, perm|os.ModeDir)
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+ err := m.Mkdir(path, perm)
+ if err != nil {
+ if err.(*os.PathError).Err == ErrFileExists {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+ path = filepath.Clean(path)
+
+ switch path {
+ case ".":
+ return FilePathSeparator
+ case "..":
+ return FilePathSeparator
+ default:
+ return path
+ }
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewReadOnlyFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+ }
+ return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+ f, ok := m.getData()[name]
+ if ok {
+ return f, nil
+ } else {
+ return nil, ErrFileNotFound
+ }
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ perm &= chmodBits
+ chmod := false
+ file, err := m.openWrite(name)
+ if err == nil && (flag&os.O_EXCL > 0) {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists}
+ }
+ if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+ file, err = m.Create(name)
+ chmod = true
+ }
+ if err != nil {
+ return nil, err
+ }
+ if flag == os.O_RDONLY {
+ file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+ }
+ if flag&os.O_APPEND > 0 {
+ _, err = file.Seek(0, os.SEEK_END)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+ err = file.Truncate(0)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if chmod {
+ return file, m.setFileMode(name, perm)
+ }
+ return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+ name = normalizePath(name)
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if _, ok := m.getData()[name]; ok {
+ err := m.unRegisterWithParent(name)
+ if err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ delete(m.getData(), name)
+ } else {
+ return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+ }
+ return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+ path = normalizePath(path)
+ m.mu.Lock()
+ m.unRegisterWithParent(path)
+ m.mu.Unlock()
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for p := range m.getData() {
+ if p == path || strings.HasPrefix(p, path+FilePathSeparator) {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ delete(m.getData(), p)
+ m.mu.Unlock()
+ m.mu.RLock()
+ }
+ }
+ return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+ oldname = normalizePath(oldname)
+ newname = normalizePath(newname)
+
+ if oldname == newname {
+ return nil
+ }
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ if _, ok := m.getData()[oldname]; ok {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ m.unRegisterWithParent(oldname)
+ fileData := m.getData()[oldname]
+ delete(m.getData(), oldname)
+ mem.ChangeFileName(fileData, newname)
+ m.getData()[newname] = fileData
+ m.registerWithParent(fileData, 0)
+ m.mu.Unlock()
+ m.mu.RLock()
+ } else {
+ return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+ }
+ return nil
+}
+
+func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fileInfo, err := m.Stat(name)
+ return fileInfo, false, err
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+ f, err := m.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ fi := mem.GetFileInfo(f.(*mem.File).Data())
+ return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+ mode &= chmodBits
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+ prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits
+
+ mode = prevOtherBits | mode
+ return m.setFileMode(name, mode)
+}
+
+func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetMode(f, mode)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) Chown(name string, uid, gid int) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound}
+ }
+
+ mem.SetUID(f, uid)
+ mem.SetGID(f, gid)
+
+ return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetModTime(f, mtime)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) List() {
+ for _, x := range m.data {
+ y := mem.FileInfo{FileData: x}
+ fmt.Println(x.Name(), y.Size())
+ }
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/os.go b/test/integration/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 000000000..f1366321e
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,113 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "time"
+)
+
+var _ Lstater = (*OsFs)(nil)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+ return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+ f, e := os.Create(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+ return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+ f, e := os.Open(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ f, e := os.OpenFile(name, flag, perm)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Remove(name string) error {
+ return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+ return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+ return os.Chmod(name, mode)
+}
+
+func (OsFs) Chown(name string, uid, gid int) error {
+ return os.Chown(name, uid, gid)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return os.Chtimes(name, atime, mtime)
+}
+
+func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fi, err := os.Lstat(name)
+ return fi, true, err
+}
+
+func (OsFs) SymlinkIfPossible(oldname, newname string) error {
+ return os.Symlink(oldname, newname)
+}
+
+func (OsFs) ReadlinkIfPossible(name string) (string, error) {
+ return os.Readlink(name)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/path.go b/test/integration/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 000000000..18f60a0f6
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,106 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ err := walkFn(path, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(fs, path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := lstatIfPossible(fs, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walk(fs, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// if the filesystem supports it, use Lstat, else use fs.Stat
+func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
+ if lfs, ok := fs.(Lstater); ok {
+ fi, _, err := lfs.LstatIfPossible(path)
+ return fi, err
+ }
+ return fs.Stat(path)
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+ return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+ info, err := lstatIfPossible(fs, root)
+ if err != nil {
+ return walkFn(root, nil, err)
+ }
+ return walk(fs, root, info, walkFn)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/readonlyfs.go b/test/integration/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 000000000..bd8f9264d
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,96 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*ReadOnlyFs)(nil)
+
+type ReadOnlyFs struct {
+ source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+ return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+ return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chown(n string, uid, gid int) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+ return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+ return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ if lsf, ok := r.source.(Lstater); ok {
+ return lsf.LstatIfPossible(name)
+ }
+ fi, err := r.Stat(name)
+ return fi, false, err
+}
+
+func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) {
+ if srdr, ok := r.source.(LinkReader); ok {
+ return srdr.ReadlinkIfPossible(name)
+ }
+
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ return nil, syscall.EPERM
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+ return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+ return nil, syscall.EPERM
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/regexpfs.go b/test/integration/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 000000000..ac359c62a
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,224 @@
+package afero
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+ "time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+ re *regexp.Regexp
+ source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+ return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+ f File
+ re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+ if r.re == nil {
+ return nil
+ }
+ if r.re.MatchString(name) {
+ return nil
+ }
+ return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Chown(name string, uid, gid int) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chown(name, uid, gid)
+}
+
+func (r *RegexpFs) Name() string {
+ return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+ dir, err := IsDir(r.source, oldname)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ if err := r.matchesName(oldname); err != nil {
+ return err
+ }
+ if err := r.matchesName(newname); err != nil {
+ return err
+ }
+ return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+ dir, err := IsDir(r.source, p)
+ if err != nil {
+ return err
+ }
+ if !dir {
+ if err := r.matchesName(p); err != nil {
+ return err
+ }
+ }
+ return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ }
+ f, err := r.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+ return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+ return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+ return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+ return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+ return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+ return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+ return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+ return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+ return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+ var rfi []os.FileInfo
+ rfi, err = f.f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range rfi {
+ if i.IsDir() || f.re.MatchString(i.Name()) {
+ fi = append(fi, i)
+ }
+ }
+ return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+ fi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range fi {
+ n = append(n, s.Name())
+ }
+ return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+ return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+ return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+ return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+ return f.f.WriteString(s)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/symlink.go b/test/integration/vendor/github.com/spf13/afero/symlink.go
new file mode 100644
index 000000000..d1c6ea53d
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/symlink.go
@@ -0,0 +1,55 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+)
+
+// Symlinker is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It indicates support for 3 symlink related interfaces that implement the
+// behaviors of the os methods:
+// - Lstat
+// - Symlink, and
+// - Readlink
+type Symlinker interface {
+ Lstater
+ Linker
+ LinkReader
+}
+
+// Linker is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem,
+// or the filesystem otherwise supports Symlink's.
+type Linker interface {
+ SymlinkIfPossible(oldname, newname string) error
+}
+
+// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system
+// does not support Symlink's either directly or through its delegated filesystem.
+// As expressed by support for the Linker interface.
+var ErrNoSymlink = errors.New("symlink not supported")
+
+// LinkReader is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+type LinkReader interface {
+ ReadlinkIfPossible(name string) (string, error)
+}
+
+// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system
+// does not support the readlink operation either directly or through its delegated filesystem.
+// As expressed by support for the LinkReader interface.
+var ErrNoReadlink = errors.New("readlink not supported")
diff --git a/test/integration/vendor/github.com/spf13/afero/unionFile.go b/test/integration/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 000000000..333d367f4
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,331 @@
+package afero
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+ Base File
+ Layer File
+ Merger DirsMerger
+ off int
+ files []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+ // first close base, so we have a newer timestamp in the overlay. If we'd close
+ // the overlay first, we'd get a cacheStale the next time we access this file
+ // -> cache would be useless ;-)
+ if f.Base != nil {
+ f.Base.Close()
+ }
+ if f.Layer != nil {
+ return f.Layer.Close()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.Read(s)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ // advance the file position also in the base file, the next
+ // call may be a write at this position (or a seek with SEEK_CUR)
+ if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+ // only overwrite err in case the seek fails: we need to
+ // report an eventual io.EOF to the caller
+ err = seekErr
+ }
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Read(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.ReadAt(s, o)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o+int64(n), io.SeekStart)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.ReadAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+ if f.Layer != nil {
+ pos, err = f.Layer.Seek(o, w)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o, w)
+ }
+ return pos, err
+ }
+ if f.Base != nil {
+ return f.Base.Seek(o, w)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.Write(s)
+ if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+ _, err = f.Base.Write(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Write(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteAt(s, o)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteAt(s, o)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+ if f.Layer != nil {
+ return f.Layer.Name()
+ }
+ return f.Base.Name()
+}
+
+// DirsMerger is how UnionFile weaves two directories together.
+// It takes the FileInfo slices from the layer and the base and returns a
+// single view.
+type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
+
+var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
+ var files = make(map[string]os.FileInfo)
+
+ for _, fi := range lofi {
+ files[fi.Name()] = fi
+ }
+
+ for _, fi := range bofi {
+ if _, exists := files[fi.Name()]; !exists {
+ files[fi.Name()] = fi
+ }
+ }
+
+ rfi := make([]os.FileInfo, len(files))
+
+ i := 0
+ for _, fi := range files {
+ rfi[i] = fi
+ i++
+ }
+
+ return rfi, nil
+
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories.
+// At the end of the directory view, the error is io.EOF if c > 0.
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+ var merge DirsMerger = f.Merger
+ if merge == nil {
+ merge = defaultUnionMergeDirsFn
+ }
+
+ if f.off == 0 {
+ var lfi []os.FileInfo
+ if f.Layer != nil {
+ lfi, err = f.Layer.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var bfi []os.FileInfo
+ if f.Base != nil {
+ bfi, err = f.Base.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ merged, err := merge(lfi, bfi)
+ if err != nil {
+ return nil, err
+ }
+ f.files = append(f.files, merged...)
+ }
+ files := f.files[f.off:]
+
+ if c <= 0 {
+ return files, nil
+ }
+
+ if len(files) == 0 {
+ return nil, io.EOF
+ }
+
+ if c > len(files) {
+ c = len(files)
+ }
+
+ defer func() { f.off += c }()
+ return files[:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+ rfi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for _, fi := range rfi {
+ names = append(names, fi.Name())
+ }
+ return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+ if f.Layer != nil {
+ return f.Layer.Stat()
+ }
+ if f.Base != nil {
+ return f.Base.Stat()
+ }
+ return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Sync()
+ if err == nil && f.Base != nil {
+ err = f.Base.Sync()
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Sync()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Truncate(s)
+ if err == nil && f.Base != nil {
+ err = f.Base.Truncate(s)
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Truncate(s)
+ }
+ return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteString(s)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteString(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteString(s)
+ }
+ return 0, BADFD
+}
+
+func copyFile(base Fs, layer Fs, name string, bfh File) error {
+ // First make sure the directory exists
+ exists, err := Exists(layer, filepath.Dir(name))
+ if err != nil {
+ return err
+ }
+ if !exists {
+ err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create the file on the overlay
+ lfh, err := layer.Create(name)
+ if err != nil {
+ return err
+ }
+ n, err := io.Copy(lfh, bfh)
+ if err != nil {
+ // If anything fails, clean up the file
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+
+ bfi, err := bfh.Stat()
+ if err != nil || bfi.Size() != n {
+ layer.Remove(name)
+ lfh.Close()
+ return syscall.EIO
+ }
+
+ err = lfh.Close()
+ if err != nil {
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+ return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+ bfh, err := base.Open(name)
+ if err != nil {
+ return err
+ }
+ defer bfh.Close()
+
+ return copyFile(base, layer, name, bfh)
+}
+
+func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error {
+ bfh, err := base.OpenFile(name, flag, perm)
+ if err != nil {
+ return err
+ }
+ defer bfh.Close()
+
+ return copyFile(base, layer, name, bfh)
+}
diff --git a/test/integration/vendor/github.com/spf13/afero/util.go b/test/integration/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 000000000..cb7de23f2
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,330 @@
+// Copyright ©2015 Steve Francia
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "golang.org/x/text/runes"
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+ return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ if err != os.ErrExist {
+ return err
+ }
+ }
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+ return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ return
+ }
+ }
+
+ exists, err := Exists(fs, path)
+ if err != nil {
+ return
+ }
+ if exists {
+ return fmt.Errorf("%v already exists", path)
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+ return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+ addSlash := func(p string) string {
+ if FilePathSeparator != p[len(p)-1:] {
+ p = p + FilePathSeparator
+ }
+ return p
+ }
+ dir := addSlash(os.TempDir())
+
+ if subPath != "" {
+ // preserve windows backslash :-(
+ if FilePathSeparator == "\\" {
+ subPath = strings.Replace(subPath, "\\", "____", -1)
+ }
+ dir = dir + UnicodeSanitize((subPath))
+ if FilePathSeparator == "\\" {
+ dir = strings.Replace(dir, "____", "\\", -1)
+ }
+
+ if exists, _ := Exists(fs, dir); exists {
+ return addSlash(dir)
+ }
+
+ err := fs.MkdirAll(dir, 0777)
+ if err != nil {
+ panic(err)
+ }
+ dir = addSlash(dir)
+ }
+ return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+ source := []rune(s)
+ target := make([]rune, 0, len(source))
+
+ for _, r := range source {
+ if unicode.IsLetter(r) ||
+ unicode.IsDigit(r) ||
+ unicode.IsMark(r) ||
+ r == '.' ||
+ r == '/' ||
+ r == '\\' ||
+ r == '_' ||
+ r == '-' ||
+ r == '%' ||
+ r == ' ' ||
+ r == '#' {
+ target = append(target, r)
+ }
+ }
+
+ return string(target)
+}
+
+// Transform characters with accents into plain forms.
+func NeuterAccents(s string) string {
+ t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
+ result, _, _ := transform.String(t, string(s))
+
+ return result
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+ return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+ return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+ if r == nil || len(subslices) == 0 {
+ return false
+ }
+
+ largestSlice := 0
+
+ for _, sl := range subslices {
+ if len(sl) > largestSlice {
+ largestSlice = len(sl)
+ }
+ }
+
+ if largestSlice == 0 {
+ return false
+ }
+
+ bufflen := largestSlice * 4
+ halflen := bufflen / 2
+ buff := make([]byte, bufflen)
+ var err error
+ var n, i int
+
+ for {
+ i++
+ if i == 1 {
+ n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+ } else {
+ if i != 2 {
+ // shift left to catch overlapping matches
+ copy(buff[:], buff[halflen:])
+ }
+ n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+ }
+
+ if n > 0 {
+ for _, sl := range subslices {
+ if bytes.Contains(buff, sl) {
+ return true
+ }
+ }
+ }
+
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+ return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err == nil && fi.IsDir() {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+ return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+ return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+ if b, _ := Exists(fs, path); !b {
+ return false, fmt.Errorf("%q path does not exist", path)
+ }
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ if fi.IsDir() {
+ f, err := fs.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+ list, err := f.Readdir(-1)
+ if err != nil {
+ return false, err
+ }
+ return len(list) == 0, nil
+ }
+ return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+ return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+ _, err := fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+ combinedPath := filepath.Join(basePathFs.path, relativePath)
+ if parent, ok := basePathFs.source.(*BasePathFs); ok {
+ return FullBaseFsPath(parent, combinedPath)
+ }
+
+ return combinedPath
+}
diff --git a/test/integration/vendor/github.com/spf13/cast/.gitignore b/test/integration/vendor/github.com/spf13/cast/.gitignore
new file mode 100644
index 000000000..53053a8ac
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.bench
diff --git a/test/integration/vendor/github.com/spf13/cast/LICENSE b/test/integration/vendor/github.com/spf13/cast/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/spf13/cast/Makefile b/test/integration/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 000000000..f01a5dbb6
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,40 @@
+GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2)
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+ go test ./...
+
+test-race: ## Run tests with race detector
+ go test -race ./...
+
+fmt: ## Run gofmt linter
+ifeq "$(GOVERSION)" "12"
+ @for d in `go list` ; do \
+ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+ echo "^ improperly formatted go files" && echo && exit 1; \
+ fi \
+ done
+endif
+
+lint: ## Run golint linter
+ @for d in `go list` ; do \
+ if [ "`golint $$d | tee /dev/stderr`" ]; then \
+ echo "^ golint errors!" && echo && exit 1; \
+ fi \
+ done
+
+vet: ## Run go vet linter
+ @if [ "`go vet | tee /dev/stderr`" ]; then \
+ echo "^ go vet errors!" && echo && exit 1; \
+ fi
+
+test-cover-html: ## Generate test coverage report
+ go test -coverprofile=coverage.out -covermode=count
+ go tool cover -func=coverage.out
+
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/test/integration/vendor/github.com/spf13/cast/README.md b/test/integration/vendor/github.com/spf13/cast/README.md
new file mode 100644
index 000000000..120a57342
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/README.md
@@ -0,0 +1,75 @@
+cast
+====
+[](https://godoc.org/github.com/spf13/cast)
+[](https://github.com/spf13/cast/actions/workflows/go.yml)
+[](https://goreportcard.com/report/github.com/spf13/cast)
+
+Easy and safe casting from one type to another in Go
+
+Don’t Panic! ... Cast
+
+## What is Cast?
+
+Cast is a library to convert between different go types in a consistent and easy way.
+
+Cast provides simple functions to easily convert a number to a string, an
+interface into a bool, etc. Cast does this intelligently when an obvious
+conversion is possible. It doesn’t make any attempts to guess what you meant,
+for example you can only convert a string to an int when it is a string
+representation of an int such as “8”. Cast was developed for use in
+[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+for meta data.
+
+## Why use Cast?
+
+When working with dynamic data in Go you often need to cast or convert the data
+from one type into another. Cast goes beyond just using type assertion (though
+it uses that when possible) to provide a very straightforward and convenient
+library.
+
+If you are working with interfaces to handle things like dynamic content
+you’ll need an easy way to convert an interface into a given type. This
+is the library for you.
+
+If you are taking in data from YAML, TOML or JSON or other formats which lack
+full types, then Cast is the library for you.
+
+## Usage
+
+Cast provides a handful of To_____ methods. These methods will always return
+the desired type. **If input is provided that will not convert to that type, the
+0 or nil value for that type will be returned**.
+
+Cast also provides identical methods To_____E. These return the same result as
+the To_____ methods, plus an additional error which tells you if it successfully
+converted. Using these methods you can tell the difference between when the
+input matched the zero value or when the conversion failed and the zero value
+was returned.
+
+The following examples are merely a sample of what is available. Please review
+the code for a complete set.
+
+### Example ‘ToString’:
+
+ cast.ToString("mayonegg") // "mayonegg"
+ cast.ToString(8) // "8"
+ cast.ToString(8.31) // "8.31"
+ cast.ToString([]byte("one time")) // "one time"
+ cast.ToString(nil) // ""
+
+ var foo interface{} = "one more time"
+ cast.ToString(foo) // "one more time"
+
+
+### Example ‘ToInt’:
+
+ cast.ToInt(8) // 8
+ cast.ToInt(8.31) // 8
+ cast.ToInt("8") // 8
+ cast.ToInt(true) // 1
+ cast.ToInt(false) // 0
+
+ var eight interface{} = 8
+ cast.ToInt(eight) // 8
+ cast.ToInt(nil) // 0
+
diff --git a/test/integration/vendor/github.com/spf13/cast/cast.go b/test/integration/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 000000000..0cfe9418d
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,176 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+ v, _ := ToBoolE(i)
+ return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+ v, _ := ToTimeE(i)
+ return v
+}
+
+func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time {
+ v, _ := ToTimeInDefaultLocationE(i, location)
+ return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+ v, _ := ToDurationE(i)
+ return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+ v, _ := ToFloat64E(i)
+ return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+ v, _ := ToInt64E(i)
+ return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+ v, _ := ToIntE(i)
+ return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+ v, _ := ToStringE(i)
+ return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+ v, _ := ToStringMapStringE(i)
+ return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+ v, _ := ToStringMapStringSliceE(i)
+ return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+ v, _ := ToStringMapBoolE(i)
+ return v
+}
+
+// ToStringMapInt casts an interface to a map[string]int type.
+func ToStringMapInt(i interface{}) map[string]int {
+ v, _ := ToStringMapIntE(i)
+ return v
+}
+
+// ToStringMapInt64 casts an interface to a map[string]int64 type.
+func ToStringMapInt64(i interface{}) map[string]int64 {
+ v, _ := ToStringMapInt64E(i)
+ return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+ v, _ := ToStringMapE(i)
+ return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+ v, _ := ToSliceE(i)
+ return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+ v, _ := ToBoolSliceE(i)
+ return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+ v, _ := ToStringSliceE(i)
+ return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+ v, _ := ToIntSliceE(i)
+ return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+ v, _ := ToDurationSliceE(i)
+ return v
+}
diff --git a/test/integration/vendor/github.com/spf13/cast/caste.go b/test/integration/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 000000000..514d759bf
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1476 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "html/template"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+ return ToTimeInDefaultLocationE(i, time.UTC)
+}
+
+// ToTimeInDefaultLocationE casts an empty interface to time.Time,
+// interpreting inputs without a timezone to be in the given location,
+// or the local timezone if nil.
+func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) {
+ i = indirect(i)
+
+ switch v := i.(type) {
+ case time.Time:
+ return v, nil
+ case string:
+ return StringToDateInDefaultLocation(v, location)
+ case json.Number:
+ s, err1 := ToInt64E(v)
+ if err1 != nil {
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+ }
+ return time.Unix(s, 0), nil
+ case int:
+ return time.Unix(int64(v), 0), nil
+ case int64:
+ return time.Unix(v, 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case uint:
+ return time.Unix(int64(v), 0), nil
+ case uint64:
+ return time.Unix(int64(v), 0), nil
+ case uint32:
+ return time.Unix(int64(v), 0), nil
+ default:
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+ }
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case time.Duration:
+ return s, nil
+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+ d = time.Duration(ToInt64(s))
+ return
+ case float32, float64:
+ d = time.Duration(ToFloat64(s))
+ return
+ case string:
+ if strings.ContainsAny(s, "nsuµmh") {
+ d, err = time.ParseDuration(s)
+ } else {
+ d, err = time.ParseDuration(s + "ns")
+ }
+ return
+ case json.Number:
+ var v float64
+ v, err = s.Float64()
+ d = time.Duration(v)
+ return
+ default:
+ err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+ return
+ }
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+ i = indirect(i)
+
+ switch b := i.(type) {
+ case bool:
+ return b, nil
+ case nil:
+ return false, nil
+ case int:
+ if i.(int) != 0 {
+ return true, nil
+ }
+ return false, nil
+ case string:
+ return strconv.ParseBool(i.(string))
+ case json.Number:
+ v, err := ToInt64E(b)
+ if err == nil {
+ return v != 0, nil
+ }
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+ default:
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+ }
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return float64(intv), nil
+ }
+
+ switch s := i.(type) {
+ case float64:
+ return s, nil
+ case float32:
+ return float64(s), nil
+ case int64:
+ return float64(s), nil
+ case int32:
+ return float64(s), nil
+ case int16:
+ return float64(s), nil
+ case int8:
+ return float64(s), nil
+ case uint:
+ return float64(s), nil
+ case uint64:
+ return float64(s), nil
+ case uint32:
+ return float64(s), nil
+ case uint16:
+ return float64(s), nil
+ case uint8:
+ return float64(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case json.Number:
+ v, err := s.Float64()
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ }
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return float32(intv), nil
+ }
+
+ switch s := i.(type) {
+ case float64:
+ return float32(s), nil
+ case float32:
+ return s, nil
+ case int64:
+ return float32(s), nil
+ case int32:
+ return float32(s), nil
+ case int16:
+ return float32(s), nil
+ case int8:
+ return float32(s), nil
+ case uint:
+ return float32(s), nil
+ case uint64:
+ return float32(s), nil
+ case uint32:
+ return float32(s), nil
+ case uint16:
+ return float32(s), nil
+ case uint8:
+ return float32(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case json.Number:
+ v, err := s.Float64()
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ }
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return int64(intv), nil
+ }
+
+ switch s := i.(type) {
+ case int64:
+ return s, nil
+ case int32:
+ return int64(s), nil
+ case int16:
+ return int64(s), nil
+ case int8:
+ return int64(s), nil
+ case uint:
+ return int64(s), nil
+ case uint64:
+ return int64(s), nil
+ case uint32:
+ return int64(s), nil
+ case uint16:
+ return int64(s), nil
+ case uint8:
+ return int64(s), nil
+ case float64:
+ return int64(s), nil
+ case float32:
+ return int64(s), nil
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case json.Number:
+ return ToInt64E(string(s))
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ }
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return int32(intv), nil
+ }
+
+ switch s := i.(type) {
+ case int64:
+ return int32(s), nil
+ case int32:
+ return s, nil
+ case int16:
+ return int32(s), nil
+ case int8:
+ return int32(s), nil
+ case uint:
+ return int32(s), nil
+ case uint64:
+ return int32(s), nil
+ case uint32:
+ return int32(s), nil
+ case uint16:
+ return int32(s), nil
+ case uint8:
+ return int32(s), nil
+ case float64:
+ return int32(s), nil
+ case float32:
+ return int32(s), nil
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ return int32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ case json.Number:
+ return ToInt32E(string(s))
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ }
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return int16(intv), nil
+ }
+
+ switch s := i.(type) {
+ case int64:
+ return int16(s), nil
+ case int32:
+ return int16(s), nil
+ case int16:
+ return s, nil
+ case int8:
+ return int16(s), nil
+ case uint:
+ return int16(s), nil
+ case uint64:
+ return int16(s), nil
+ case uint32:
+ return int16(s), nil
+ case uint16:
+ return int16(s), nil
+ case uint8:
+ return int16(s), nil
+ case float64:
+ return int16(s), nil
+ case float32:
+ return int16(s), nil
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ return int16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ case json.Number:
+ return ToInt16E(string(s))
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ }
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return int8(intv), nil
+ }
+
+ switch s := i.(type) {
+ case int64:
+ return int8(s), nil
+ case int32:
+ return int8(s), nil
+ case int16:
+ return int8(s), nil
+ case int8:
+ return s, nil
+ case uint:
+ return int8(s), nil
+ case uint64:
+ return int8(s), nil
+ case uint32:
+ return int8(s), nil
+ case uint16:
+ return int8(s), nil
+ case uint8:
+ return int8(s), nil
+ case float64:
+ return int8(s), nil
+ case float32:
+ return int8(s), nil
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ return int8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ case json.Number:
+ return ToInt8E(string(s))
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ }
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ return intv, nil
+ }
+
+ switch s := i.(type) {
+ case int64:
+ return int(s), nil
+ case int32:
+ return int(s), nil
+ case int16:
+ return int(s), nil
+ case int8:
+ return int(s), nil
+ case uint:
+ return int(s), nil
+ case uint64:
+ return int(s), nil
+ case uint32:
+ return int(s), nil
+ case uint16:
+ return int(s), nil
+ case uint8:
+ return int(s), nil
+ case float64:
+ return int(s), nil
+ case float32:
+ return int(s), nil
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ return int(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case json.Number:
+ return ToIntE(string(s))
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ }
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ if intv < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(intv), nil
+ }
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ if v < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ case json.Number:
+ return ToUintE(string(s))
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case uint:
+ return s, nil
+ case uint64:
+ return uint(s), nil
+ case uint32:
+ return uint(s), nil
+ case uint16:
+ return uint(s), nil
+ case uint8:
+ return uint(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ }
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ if intv < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(intv), nil
+ }
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ if v < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ case json.Number:
+ return ToUint64E(string(s))
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case uint:
+ return uint64(s), nil
+ case uint64:
+ return s, nil
+ case uint32:
+ return uint64(s), nil
+ case uint16:
+ return uint64(s), nil
+ case uint8:
+ return uint64(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ }
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ if intv < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(intv), nil
+ }
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ if v < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ case json.Number:
+ return ToUint32E(string(s))
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case uint:
+ return uint32(s), nil
+ case uint64:
+ return uint32(s), nil
+ case uint32:
+ return s, nil
+ case uint16:
+ return uint32(s), nil
+ case uint8:
+ return uint32(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ }
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ if intv < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(intv), nil
+ }
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ if v < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ case json.Number:
+ return ToUint16E(string(s))
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case uint:
+ return uint16(s), nil
+ case uint64:
+ return uint16(s), nil
+ case uint32:
+ return uint16(s), nil
+ case uint16:
+ return s, nil
+ case uint8:
+ return uint16(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ }
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+ i = indirect(i)
+
+ intv, ok := toInt(i)
+ if ok {
+ if intv < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(intv), nil
+ }
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
+ if err == nil {
+ if v < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+ case json.Number:
+ return ToUint8E(string(s))
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case uint:
+ return uint8(s), nil
+ case uint64:
+ return uint8(s), nil
+ case uint32:
+ return uint8(s), nil
+ case uint16:
+ return uint8(s), nil
+ case uint8:
+ return s, nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+ }
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+ if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+ // Avoid creating a reflect.Value if it's not a pointer.
+ return a
+ }
+ v := reflect.ValueOf(a)
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+
+ var errorType = reflect.TypeOf((*error)(nil)).Elem()
+ var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+ v := reflect.ValueOf(a)
+ for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+ i = indirectToStringerOrError(i)
+
+ switch s := i.(type) {
+ case string:
+ return s, nil
+ case bool:
+ return strconv.FormatBool(s), nil
+ case float64:
+ return strconv.FormatFloat(s, 'f', -1, 64), nil
+ case float32:
+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+ case int:
+ return strconv.Itoa(s), nil
+ case int64:
+ return strconv.FormatInt(s, 10), nil
+ case int32:
+ return strconv.Itoa(int(s)), nil
+ case int16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case int8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint64:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint32:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint16:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint8:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case json.Number:
+ return s.String(), nil
+ case []byte:
+ return string(s), nil
+ case template.HTML:
+ return string(s), nil
+ case template.URL:
+ return string(s), nil
+ case template.JS:
+ return string(s), nil
+ case template.CSS:
+ return string(s), nil
+ case template.HTMLAttr:
+ return string(s), nil
+ case nil:
+ return "", nil
+ case fmt.Stringer:
+ return s.String(), nil
+ case error:
+ return s.Error(), nil
+ default:
+ return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+ }
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+ var m = map[string]string{}
+
+ switch v := i.(type) {
+ case map[string]string:
+ return v, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+ }
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+ var m = map[string][]string{}
+
+ switch v := i.(type) {
+ case map[string][]string:
+ return v, nil
+ case map[string][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[string]string:
+ for k, val := range v {
+ m[ToString(k)] = []string{val}
+ }
+ case map[string]interface{}:
+ for k, val := range v {
+ switch vt := val.(type) {
+ case []interface{}:
+ m[ToString(k)] = ToStringSlice(vt)
+ case []string:
+ m[ToString(k)] = vt
+ default:
+ m[ToString(k)] = []string{ToString(val)}
+ }
+ }
+ return m, nil
+ case map[interface{}][]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ key, err := ToStringE(k)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ value, err := ToStringSliceE(val)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ m[key] = value
+ }
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+ var m = map[string]bool{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]bool:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+ }
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+ var m = map[string]interface{}{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = val
+ }
+ return m, nil
+ case map[string]interface{}:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+ }
+}
+
+// ToStringMapIntE casts an interface to a map[string]int{} type.
+func ToStringMapIntE(i interface{}) (map[string]int, error) {
+ var m = map[string]int{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt(val)
+ }
+ return m, nil
+ case map[string]int:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToIntE(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToStringMapInt64E casts an interface to a map[string]int64{} type.
+func ToStringMapInt64E(i interface{}) (map[string]int64, error) {
+ var m = map[string]int64{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]int64:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToInt64E(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+ var s []interface{}
+
+ switch v := i.(type) {
+ case []interface{}:
+ return append(s, v...), nil
+ case []map[string]interface{}:
+ for _, u := range v {
+ s = append(s, u)
+ }
+ return s, nil
+ default:
+ return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+ }
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+ if i == nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+
+ switch v := i.(type) {
+ case []bool:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]bool, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToBoolE(s.Index(j).Interface())
+ if err != nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+ var a []string
+
+ switch v := i.(type) {
+ case []interface{}:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []string:
+ return v, nil
+ case []int8:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []int:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []int32:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []int64:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []float32:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []float64:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case string:
+ return strings.Fields(v), nil
+ case []error:
+ for _, err := range i.([]error) {
+ a = append(a, err.Error())
+ }
+ return a, nil
+ case interface{}:
+ str, err := ToStringE(v)
+ if err != nil {
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+ return []string{str}, nil
+ default:
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+ if i == nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+
+ switch v := i.(type) {
+ case []int:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]int, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToIntE(s.Index(j).Interface())
+ if err != nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+}
+
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+ if i == nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+
+ switch v := i.(type) {
+ case []time.Duration:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]time.Duration, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToDurationE(s.Index(j).Interface())
+ if err != nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats. If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+ return parseDateWith(s, time.UTC, timeFormats)
+}
+
+// StringToDateInDefaultLocation casts an empty interface to a time.Time,
+// interpreting inputs without a timezone to be in the given location,
+// or the local timezone if nil.
+func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) {
+ return parseDateWith(s, location, timeFormats)
+}
+
+type timeFormatType int
+
+const (
+ timeFormatNoTimezone timeFormatType = iota
+ timeFormatNamedTimezone
+ timeFormatNumericTimezone
+ timeFormatNumericAndNamedTimezone
+ timeFormatTimeOnly
+)
+
+type timeFormat struct {
+ format string
+ typ timeFormatType
+}
+
+func (f timeFormat) hasTimezone() bool {
+ // We don't include the formats with only named timezones, see
+ // https://github.com/golang/go/issues/19694#issuecomment-289103522
+ return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone
+}
+
+var (
+ timeFormats = []timeFormat{
+ {time.RFC3339, timeFormatNumericTimezone},
+ {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
+ {time.RFC1123Z, timeFormatNumericTimezone},
+ {time.RFC1123, timeFormatNamedTimezone},
+ {time.RFC822Z, timeFormatNumericTimezone},
+ {time.RFC822, timeFormatNamedTimezone},
+ {time.RFC850, timeFormatNamedTimezone},
+ {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
+ {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
+ {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
+ {"2006-01-02 15:04:05", timeFormatNoTimezone},
+ {time.ANSIC, timeFormatNoTimezone},
+ {time.UnixDate, timeFormatNamedTimezone},
+ {time.RubyDate, timeFormatNumericTimezone},
+ {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
+ {"2006-01-02", timeFormatNoTimezone},
+ {"02 Jan 2006", timeFormatNoTimezone},
+ {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
+ {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
+ {time.Kitchen, timeFormatTimeOnly},
+ {time.Stamp, timeFormatTimeOnly},
+ {time.StampMilli, timeFormatTimeOnly},
+ {time.StampMicro, timeFormatTimeOnly},
+ {time.StampNano, timeFormatTimeOnly},
+ }
+)
+
+func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) {
+
+ for _, format := range formats {
+ if d, e = time.Parse(format.format, s); e == nil {
+
+ // Some time formats have a zone name, but no offset, so it gets
+ // put in that zone name (not the default one passed in to us), but
+ // without that zone's offset. So set the location manually.
+ if format.typ <= timeFormatNamedTimezone {
+ if location == nil {
+ location = time.Local
+ }
+ year, month, day := d.Date()
+ hour, min, sec := d.Clock()
+ d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location)
+ }
+
+ return
+ }
+ }
+ return d, fmt.Errorf("unable to parse date: %s", s)
+}
+
+// jsonStringToObject attempts to unmarshall a string as JSON into
+// the object passed as pointer.
+func jsonStringToObject(s string, v interface{}) error {
+ data := []byte(s)
+ return json.Unmarshal(data, v)
+}
+
+// toInt returns the int value of v if v or v's underlying type
+// is an int.
+// Note that this will return false for int64 etc. types.
+func toInt(v interface{}) (int, bool) {
+ switch v := v.(type) {
+ case int:
+ return v, true
+ case time.Weekday:
+ return int(v), true
+ case time.Month:
+ return int(v), true
+ default:
+ return 0, false
+ }
+}
+
+func trimZeroDecimal(s string) string {
+ var foundZero bool
+ for i := len(s); i > 0; i-- {
+ switch s[i-1] {
+ case '.':
+ if foundZero {
+ return s[:i-1]
+ }
+ case '0':
+ foundZero = true
+ default:
+ return s
+ }
+ }
+ return s
+}
diff --git a/test/integration/vendor/github.com/spf13/cast/timeformattype_string.go b/test/integration/vendor/github.com/spf13/cast/timeformattype_string.go
new file mode 100644
index 000000000..1524fc82c
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/cast/timeformattype_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type timeFormatType"; DO NOT EDIT.
+
+package cast
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[timeFormatNoTimezone-0]
+ _ = x[timeFormatNamedTimezone-1]
+ _ = x[timeFormatNumericTimezone-2]
+ _ = x[timeFormatNumericAndNamedTimezone-3]
+ _ = x[timeFormatTimeOnly-4]
+}
+
+const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly"
+
+var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119}
+
+func (i timeFormatType) String() string {
+ if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) {
+ return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]]
+}
diff --git a/test/integration/vendor/github.com/spf13/jwalterweatherman/.gitignore b/test/integration/vendor/github.com/spf13/jwalterweatherman/.gitignore
new file mode 100644
index 000000000..a71f88af8
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/jwalterweatherman/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.bench
+go.sum
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/spf13/jwalterweatherman/LICENSE b/test/integration/vendor/github.com/spf13/jwalterweatherman/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/jwalterweatherman/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/spf13/jwalterweatherman/README.md b/test/integration/vendor/github.com/spf13/jwalterweatherman/README.md
new file mode 100644
index 000000000..932a23fc6
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/jwalterweatherman/README.md
@@ -0,0 +1,148 @@
+jWalterWeatherman
+=================
+
+Seamless printing to the terminal (stdout) and logging to a io.Writer
+(file) that’s as easy to use as fmt.Println.
+
+
+Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422)
+
+JWW is primarily a wrapper around the excellent standard log library. It
+provides a few advantages over using the standard log library alone.
+
+1. Ready to go out of the box.
+2. One library for both printing to the terminal and logging (to files).
+3. Really easy to log to either a temp file or a file you specify.
+
+
+I really wanted a very straightforward library that could seamlessly do
+the following things.
+
+1. Replace all the println, printf, etc statements thoughout my code with
+ something more useful
+2. Allow the user to easily control what levels are printed to stdout
+3. Allow the user to easily control what levels are logged
+4. Provide an easy mechanism (like fmt.Println) to print info to the user
+ which can be easily logged as well
+5. Due to 2 & 3 provide easy verbose mode for output and logs
+6. Not have any unnecessary initialization cruft. Just use it.
+
+# Usage
+
+## Step 1. Use it
+Put calls throughout your source based on type of feedback.
+No initialization or setup needs to happen. Just start calling things.
+
+Available Loggers are:
+
+ * TRACE
+ * DEBUG
+ * INFO
+ * WARN
+ * ERROR
+ * CRITICAL
+ * FATAL
+
+These each are loggers based on the log standard library and follow the
+standard usage. Eg.
+
+```go
+ import (
+ jww "github.com/spf13/jwalterweatherman"
+ )
+
+ ...
+
+ if err != nil {
+
+ // This is a pretty serious error and the user should know about
+ // it. It will be printed to the terminal as well as logged under the
+ // default thresholds.
+
+ jww.ERROR.Println(err)
+ }
+
+ if err2 != nil {
+ // This error isn’t going to materially change the behavior of the
+ // application, but it’s something that may not be what the user
+ // expects. Under the default thresholds, Warn will be logged, but
+ // not printed to the terminal.
+
+ jww.WARN.Println(err2)
+ }
+
+ // Information that’s relevant to what’s happening, but not very
+ // important for the user. Under the default thresholds this will be
+ // discarded.
+
+ jww.INFO.Printf("information %q", response)
+
+```
+
+NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook:
+
+```go
+notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+notepad.WARN.Println("Some warning"")
+```
+
+_Why 7 levels?_
+
+Maybe you think that 7 levels are too much for any application... and you
+are probably correct. Just because there are seven levels doesn’t mean
+that you should be using all 7 levels. Pick the right set for your needs.
+Remember they only have to mean something to your project.
+
+## Step 2. Optionally configure JWW
+
+Under the default thresholds :
+
+ * Debug, Trace & Info goto /dev/null
+ * Warn and above is logged (when a log file/io.Writer is provided)
+ * Error and above is printed to the terminal (stdout)
+
+### Changing the thresholds
+
+The threshold can be changed at any time, but will only affect calls that
+execute after the change was made.
+
+This is very useful if your application has a verbose mode. Of course you
+can decide what verbose means to you or even have multiple levels of
+verbosity.
+
+
+```go
+ import (
+ jww "github.com/spf13/jwalterweatherman"
+ )
+
+ if Verbose {
+ jww.SetLogThreshold(jww.LevelTrace)
+ jww.SetStdoutThreshold(jww.LevelInfo)
+ }
+```
+
+Note that JWW's own internal output uses log levels as well, so set the log
+level before making any other calls if you want to see what it's up to.
+
+
+### Setting a log file
+
+JWW can log to any `io.Writer`:
+
+
+```go
+
+ jww.SetLogOutput(customWriter)
+
+```
+
+
+# More information
+
+This is an early release. I’ve been using it for a while and this is the
+third interface I’ve tried. I like this one pretty well, but no guarantees
+that it won’t change a bit.
+
+I wrote this for use in [hugo](https://gohugo.io). If you are looking
+for a static website engine that’s super fast please checkout Hugo.
diff --git a/test/integration/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/test/integration/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
new file mode 100644
index 000000000..a018c15c4
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -0,0 +1,111 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+var (
+ TRACE *log.Logger
+ DEBUG *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ ERROR *log.Logger
+ CRITICAL *log.Logger
+ FATAL *log.Logger
+
+ LOG *log.Logger
+ FEEDBACK *Feedback
+
+ defaultNotepad *Notepad
+)
+
+func reloadDefaultNotepad() {
+ TRACE = defaultNotepad.TRACE
+ DEBUG = defaultNotepad.DEBUG
+ INFO = defaultNotepad.INFO
+ WARN = defaultNotepad.WARN
+ ERROR = defaultNotepad.ERROR
+ CRITICAL = defaultNotepad.CRITICAL
+ FATAL = defaultNotepad.FATAL
+
+ LOG = defaultNotepad.LOG
+ FEEDBACK = defaultNotepad.FEEDBACK
+}
+
+func init() {
+ defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+ reloadDefaultNotepad()
+}
+
+// SetLogThreshold set the log threshold for the default notepad. Trace by default.
+func SetLogThreshold(threshold Threshold) {
+ defaultNotepad.SetLogThreshold(threshold)
+ reloadDefaultNotepad()
+}
+
+// SetLogOutput set the log output for the default notepad. Discarded by default.
+func SetLogOutput(handle io.Writer) {
+ defaultNotepad.SetLogOutput(handle)
+ reloadDefaultNotepad()
+}
+
+// SetStdoutThreshold set the standard output threshold for the default notepad.
+// Info by default.
+func SetStdoutThreshold(threshold Threshold) {
+ defaultNotepad.SetStdoutThreshold(threshold)
+ reloadDefaultNotepad()
+}
+
+// SetStdoutOutput set the stdout output for the default notepad. Default is stdout.
+func SetStdoutOutput(handle io.Writer) {
+ defaultNotepad.outHandle = handle
+ defaultNotepad.init()
+ reloadDefaultNotepad()
+}
+
+// SetPrefix set the prefix for the default logger. Empty by default.
+func SetPrefix(prefix string) {
+ defaultNotepad.SetPrefix(prefix)
+ reloadDefaultNotepad()
+}
+
+// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
+func SetFlags(flags int) {
+ defaultNotepad.SetFlags(flags)
+ reloadDefaultNotepad()
+}
+
+// SetLogListeners configures the default logger with one or more log listeners.
+func SetLogListeners(l ...LogListener) {
+ defaultNotepad.logListeners = l
+ defaultNotepad.init()
+ reloadDefaultNotepad()
+}
+
+// Level returns the current global log threshold.
+func LogThreshold() Threshold {
+ return defaultNotepad.logThreshold
+}
+
+// Level returns the current global output threshold.
+func StdoutThreshold() Threshold {
+ return defaultNotepad.stdoutThreshold
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func GetLogThreshold() Threshold {
+ return defaultNotepad.GetLogThreshold()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func GetStdoutThreshold() Threshold {
+ return defaultNotepad.GetStdoutThreshold()
+}
diff --git a/test/integration/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/test/integration/vendor/github.com/spf13/jwalterweatherman/log_counter.go
new file mode 100644
index 000000000..41285f3dc
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -0,0 +1,46 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "io"
+ "sync/atomic"
+)
+
+// Counter is an io.Writer that increments a counter on Write.
+type Counter struct {
+ count uint64
+}
+
+func (c *Counter) incr() {
+ atomic.AddUint64(&c.count, 1)
+}
+
+// Reset resets the counter.
+func (c *Counter) Reset() {
+ atomic.StoreUint64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *Counter) Count() uint64 {
+ return atomic.LoadUint64(&c.count)
+}
+
+func (c *Counter) Write(p []byte) (n int, err error) {
+ c.incr()
+ return len(p), nil
+}
+
+// LogCounter creates a LogListener that counts log statements >= the given threshold.
+func LogCounter(counter *Counter, t1 Threshold) LogListener {
+ return func(t2 Threshold) io.Writer {
+ if t2 < t1 {
+ // Not interested in this threshold.
+ return nil
+ }
+ return counter
+ }
+}
diff --git a/test/integration/vendor/github.com/spf13/jwalterweatherman/notepad.go b/test/integration/vendor/github.com/spf13/jwalterweatherman/notepad.go
new file mode 100644
index 000000000..cc7957bf7
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -0,0 +1,225 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+)
+
+type Threshold int
+
+func (t Threshold) String() string {
+ return prefixes[t]
+}
+
+const (
+ LevelTrace Threshold = iota
+ LevelDebug
+ LevelInfo
+ LevelWarn
+ LevelError
+ LevelCritical
+ LevelFatal
+)
+
+var prefixes map[Threshold]string = map[Threshold]string{
+ LevelTrace: "TRACE",
+ LevelDebug: "DEBUG",
+ LevelInfo: "INFO",
+ LevelWarn: "WARN",
+ LevelError: "ERROR",
+ LevelCritical: "CRITICAL",
+ LevelFatal: "FATAL",
+}
+
+// Notepad is where you leave a note!
+type Notepad struct {
+ TRACE *log.Logger
+ DEBUG *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ ERROR *log.Logger
+ CRITICAL *log.Logger
+ FATAL *log.Logger
+
+ LOG *log.Logger
+ FEEDBACK *Feedback
+
+ loggers [7]**log.Logger
+ logHandle io.Writer
+ outHandle io.Writer
+ logThreshold Threshold
+ stdoutThreshold Threshold
+ prefix string
+ flags int
+
+ logListeners []LogListener
+}
+
+// A LogListener can ble supplied to a Notepad to listen on log writes for a given
+// threshold. This can be used to capture log events in unit tests and similar.
+// Note that this function will be invoked once for each log threshold. If
+// the given threshold is not of interest to you, return nil.
+// Note that these listeners will receive log events for a given threshold, even
+// if the current configuration says not to log it. That way you can count ERRORs even
+// if you don't print them to the console.
+type LogListener func(t Threshold) io.Writer
+
+// NewNotepad creates a new Notepad.
+func NewNotepad(
+ outThreshold Threshold,
+ logThreshold Threshold,
+ outHandle, logHandle io.Writer,
+ prefix string, flags int,
+ logListeners ...LogListener,
+) *Notepad {
+
+ n := &Notepad{logListeners: logListeners}
+
+ n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
+ n.outHandle = outHandle
+ n.logHandle = logHandle
+ n.stdoutThreshold = outThreshold
+ n.logThreshold = logThreshold
+
+ if len(prefix) != 0 {
+ n.prefix = "[" + prefix + "] "
+ } else {
+ n.prefix = ""
+ }
+
+ n.flags = flags
+
+ n.LOG = log.New(n.logHandle,
+ "LOG: ",
+ n.flags)
+ n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
+
+ n.init()
+ return n
+}
+
+// init creates the loggers for each level depending on the notepad thresholds.
+func (n *Notepad) init() {
+ logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
+
+ for t, logger := range n.loggers {
+ threshold := Threshold(t)
+ prefix := n.prefix + threshold.String() + " "
+
+ switch {
+ case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
+ *logger = log.New(n.createLogWriters(threshold, logAndOut), prefix, n.flags)
+
+ case threshold >= n.logThreshold:
+ *logger = log.New(n.createLogWriters(threshold, n.logHandle), prefix, n.flags)
+
+ case threshold >= n.stdoutThreshold:
+ *logger = log.New(n.createLogWriters(threshold, n.outHandle), prefix, n.flags)
+
+ default:
+ *logger = log.New(n.createLogWriters(threshold, ioutil.Discard), prefix, n.flags)
+ }
+ }
+}
+
+func (n *Notepad) createLogWriters(t Threshold, handle io.Writer) io.Writer {
+ if len(n.logListeners) == 0 {
+ return handle
+ }
+ writers := []io.Writer{handle}
+ for _, l := range n.logListeners {
+ w := l(t)
+ if w != nil {
+ writers = append(writers, w)
+ }
+ }
+
+ if len(writers) == 1 {
+ return handle
+ }
+
+ return io.MultiWriter(writers...)
+}
+
+// SetLogThreshold changes the threshold above which messages are written to the
+// log file.
+func (n *Notepad) SetLogThreshold(threshold Threshold) {
+ n.logThreshold = threshold
+ n.init()
+}
+
+// SetLogOutput changes the file where log messages are written.
+func (n *Notepad) SetLogOutput(handle io.Writer) {
+ n.logHandle = handle
+ n.init()
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func (n *Notepad) GetLogThreshold() Threshold {
+ return n.logThreshold
+}
+
+// SetStdoutThreshold changes the threshold above which messages are written to the
+// standard output.
+func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
+ n.stdoutThreshold = threshold
+ n.init()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func (n *Notepad) GetStdoutThreshold() Threshold {
+ return n.stdoutThreshold
+}
+
+// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
+// brackets at the beginning of the line. An empty prefix won't be displayed at all.
+func (n *Notepad) SetPrefix(prefix string) {
+ if len(prefix) != 0 {
+ n.prefix = "[" + prefix + "] "
+ } else {
+ n.prefix = ""
+ }
+ n.init()
+}
+
+// SetFlags choose which flags the logger will display (after prefix and message
+// level). See the package log for more informations on this.
+func (n *Notepad) SetFlags(flags int) {
+ n.flags = flags
+ n.init()
+}
+
+// Feedback writes plainly to the outHandle while
+// logging with the standard extra information (date, file, etc).
+type Feedback struct {
+ out *log.Logger
+ log *log.Logger
+}
+
+func (fb *Feedback) Println(v ...interface{}) {
+ fb.output(fmt.Sprintln(v...))
+}
+
+func (fb *Feedback) Printf(format string, v ...interface{}) {
+ fb.output(fmt.Sprintf(format, v...))
+}
+
+func (fb *Feedback) Print(v ...interface{}) {
+ fb.output(fmt.Sprint(v...))
+}
+
+func (fb *Feedback) output(s string) {
+ if fb.out != nil {
+ fb.out.Output(2, s)
+ }
+ if fb.log != nil {
+ fb.log.Output(2, s)
+ }
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/.editorconfig b/test/integration/vendor/github.com/spf13/viper/.editorconfig
new file mode 100644
index 000000000..6d0b6d356
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/.editorconfig
@@ -0,0 +1,15 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
+
+[{Makefile,*.mk}]
+indent_style = tab
diff --git a/test/integration/vendor/github.com/spf13/viper/.gitignore b/test/integration/vendor/github.com/spf13/viper/.gitignore
new file mode 100644
index 000000000..896250839
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/.gitignore
@@ -0,0 +1,5 @@
+/.idea/
+/bin/
+/build/
+/var/
+/vendor/
diff --git a/test/integration/vendor/github.com/spf13/viper/.golangci.yaml b/test/integration/vendor/github.com/spf13/viper/.golangci.yaml
new file mode 100644
index 000000000..16e039652
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/.golangci.yaml
@@ -0,0 +1,96 @@
+run:
+ timeout: 5m
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/spf13/viper)
+ golint:
+ min-confidence: 0
+ goimports:
+ local-prefixes: github.com/spf13/viper
+
+linters:
+ disable-all: true
+ enable:
+ - bodyclose
+ - deadcode
+ - dogsled
+ - dupl
+ - durationcheck
+ - exhaustive
+ - exportloopref
+ - gci
+ - gofmt
+ - gofumpt
+ - goimports
+ - gomoddirectives
+ - goprintffuncname
+ - govet
+ - importas
+ - ineffassign
+ - makezero
+ - misspell
+ - nakedret
+ - nilerr
+ - noctx
+ - nolintlint
+ - prealloc
+ - predeclared
+ - revive
+ - rowserrcheck
+ - sqlclosecheck
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - tparallel
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - varcheck
+ - wastedassign
+ - whitespace
+
+ # fixme
+ # - cyclop
+ # - errcheck
+ # - errorlint
+ # - exhaustivestruct
+ # - forbidigo
+ # - forcetypeassert
+ # - gochecknoglobals
+ # - gochecknoinits
+ # - gocognit
+ # - goconst
+ # - gocritic
+ # - gocyclo
+ # - godot
+ # - gosec
+ # - gosimple
+ # - ifshort
+ # - lll
+ # - nlreturn
+ # - paralleltest
+ # - scopelint
+ # - thelper
+ # - wrapcheck
+
+ # unused
+ # - depguard
+ # - goheader
+ # - gomodguard
+
+ # don't enable:
+ # - asciicheck
+ # - funlen
+ # - godox
+ # - goerr113
+ # - gomnd
+ # - interfacer
+ # - maligned
+ # - nestif
+ # - testpackage
+ # - wsl
diff --git a/test/integration/vendor/github.com/spf13/viper/LICENSE b/test/integration/vendor/github.com/spf13/viper/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/test/integration/vendor/github.com/spf13/viper/Makefile b/test/integration/vendor/github.com/spf13/viper/Makefile
new file mode 100644
index 000000000..130c427e8
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/Makefile
@@ -0,0 +1,76 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+OS = $(shell uname | tr A-Z a-z)
+export PATH := $(abspath bin/):${PATH}
+
+# Build variables
+BUILD_DIR ?= build
+export CGO_ENABLED ?= 0
+export GOOS = $(shell go env GOOS)
+ifeq (${VERBOSE}, 1)
+ifeq ($(filter -v,${GOARGS}),)
+ GOARGS += -v
+endif
+TEST_FORMAT = short-verbose
+endif
+
+# Dependency versions
+GOTESTSUM_VERSION = 1.8.0
+GOLANGCI_VERSION = 1.49.0
+
+# Add the ability to override some variables
+# Use with care
+-include override.mk
+
+.PHONY: clear
+clear: ## Clear the working area and the project
+ rm -rf bin/
+
+.PHONY: check
+check: test lint ## Run tests and linters
+
+bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION}
+ @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum
+bin/gotestsum-${GOTESTSUM_VERSION}:
+ @mkdir -p bin
+ curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION}
+
+TEST_PKGS ?= ./...
+.PHONY: test
+test: TEST_FORMAT ?= short
+test: SHELL = /bin/bash
+test: export CGO_ENABLED=1
+test: bin/gotestsum ## Run tests
+ @mkdir -p ${BUILD_DIR}
+ bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
+
+bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION}
+ @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint
+bin/golangci-lint-${GOLANGCI_VERSION}:
+ @mkdir -p bin
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION}
+ @mv bin/golangci-lint "$@"
+
+.PHONY: lint
+lint: bin/golangci-lint ## Run linter
+ bin/golangci-lint run
+
+.PHONY: fix
+fix: bin/golangci-lint ## Fix lint violations
+ bin/golangci-lint run --fix
+
+# Add custom targets here
+-include custom.mk
+
+.PHONY: list
+list: ## List all make targets
+ @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort
+
+.PHONY: help
+.DEFAULT_GOAL := help
+help:
+ @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+# Variable outputting/exporting rules
+var-%: ; @echo $($*)
+varexport-%: ; @echo $*=$($*)
diff --git a/test/integration/vendor/github.com/spf13/viper/README.md b/test/integration/vendor/github.com/spf13/viper/README.md
new file mode 100644
index 000000000..63413a7dc
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/README.md
@@ -0,0 +1,881 @@
+> ## Viper v2 feedback
+> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9
+>
+> **Thank you!**
+
+
+
+[](https://github.com/avelino/awesome-go#configuration)
+[](https://repl.it/@sagikazarmark/Viper-example#main.go)
+
+[](https://github.com/spf13/viper/actions?query=workflow%3ACI)
+[](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[](https://goreportcard.com/report/github.com/spf13/viper)
+
+[](https://pkg.go.dev/mod/github.com/spf13/viper)
+
+**Go configuration with fangs!**
+
+Many Go projects are built using Viper including:
+
+* [Hugo](http://gohugo.io)
+* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
+* [Imgur’s Incus](https://github.com/Imgur/incus)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [Docker Notary](https://github.com/docker/Notary)
+* [BloomApi](https://www.bloomapi.com/)
+* [doctl](https://github.com/digitalocean/doctl)
+* [Clairctl](https://github.com/jgsqware/clairctl)
+* [Mercure](https://mercure.rocks)
+
+
+## Install
+
+```shell
+go get github.com/spf13/viper
+```
+
+**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
+
+
+## What is Viper?
+
+Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
+to work within an application, and can handle all types of configuration needs
+and formats. It supports:
+
+* setting defaults
+* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files
+* live watching and re-reading of config files (optional)
+* reading from environment variables
+* reading from remote config systems (etcd or Consul), and watching changes
+* reading from command line flags
+* reading from buffer
+* setting explicit values
+
+Viper can be thought of as a registry for all of your applications configuration needs.
+
+
+## Why Viper?
+
+When building a modern application, you don’t want to worry about
+configuration file formats; you want to focus on building awesome software.
+Viper is here to help with that.
+
+Viper does the following for you:
+
+1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats.
+2. Provide a mechanism to set default values for your different configuration options.
+3. Provide a mechanism to set override values for options specified through command line flags.
+4. Provide an alias system to easily rename parameters without breaking existing code.
+5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default.
+
+Viper uses the following precedence order. Each item takes precedence over the item below it:
+
+ * explicit call to `Set`
+ * flag
+ * env
+ * config
+ * key/value store
+ * default
+
+**Important:** Viper configuration keys are case insensitive.
+There are ongoing discussions about making that optional.
+
+
+## Putting Values into Viper
+
+### Establishing Defaults
+
+A good configuration system will support default values. A default value is not
+required for a key, but it’s useful in the event that a key hasn't been set via
+config file, environment variable, remote configuration or flag.
+
+Examples:
+
+```go
+viper.SetDefault("ContentDir", "content")
+viper.SetDefault("LayoutDir", "layouts")
+viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
+```
+
+### Reading Config Files
+
+Viper requires minimal configuration so it knows where to look for config files.
+Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but
+currently a single Viper instance only supports a single configuration file.
+Viper does not default to any configuration search paths leaving defaults decision
+to an application.
+
+Here is an example of how to use Viper to search for and read a configuration file.
+None of the specific paths are required, but at least one path should be provided
+where a configuration file is expected.
+
+```go
+viper.SetConfigName("config") // name of config file (without extension)
+viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name
+viper.AddConfigPath("/etc/appname/") // path to look for the config file in
+viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
+viper.AddConfigPath(".") // optionally look for config in the working directory
+err := viper.ReadInConfig() // Find and read the config file
+if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("fatal error config file: %w", err))
+}
+```
+
+You can handle the specific case where no config file is found like this:
+
+```go
+if err := viper.ReadInConfig(); err != nil {
+ if _, ok := err.(viper.ConfigFileNotFoundError); ok {
+ // Config file not found; ignore error if desired
+ } else {
+ // Config file was found but another error was produced
+ }
+}
+
+// Config file found and successfully parsed
+```
+
+*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc`
+
+### Writing Config Files
+
+Reading from config files is useful, but at times you want to store all modifications made at run time.
+For that, a bunch of commands are available, each with its own purpose:
+
+* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists.
+* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists.
+* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists.
+* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists.
+
+As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate.
+
+A small examples section:
+
+```go
+viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName'
+viper.SafeWriteConfig()
+viper.WriteConfigAs("/path/to/my/.config")
+viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written
+viper.SafeWriteConfigAs("/path/to/my/.other_config")
+```
+
+### Watching and re-reading config files
+
+Viper supports the ability to have your application live read a config file while running.
+
+Gone are the days of needing to restart a server to have a config take effect,
+viper powered applications can read an update to a config file while running and
+not miss a beat.
+
+Simply tell the viper instance to watchConfig.
+Optionally you can provide a function for Viper to run each time a change occurs.
+
+**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
+
+```go
+viper.OnConfigChange(func(e fsnotify.Event) {
+ fmt.Println("Config file changed:", e.Name)
+})
+viper.WatchConfig()
+```
+
+### Reading Config from io.Reader
+
+Viper predefines many configuration sources such as files, environment
+variables, flags, and remote K/V store, but you are not bound to them. You can
+also implement your own required configuration source and feed it to viper.
+
+```go
+viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
+
+// any approach to require this configuration into your program.
+var yamlExample = []byte(`
+Hacker: true
+name: steve
+hobbies:
+- skateboarding
+- snowboarding
+- go
+clothing:
+ jacket: leather
+ trousers: denim
+age: 35
+eyes : brown
+beard: true
+`)
+
+viper.ReadConfig(bytes.NewBuffer(yamlExample))
+
+viper.Get("name") // this would be "steve"
+```
+
+### Setting Overrides
+
+These could be from a command line flag, or from your own application logic.
+
+```go
+viper.Set("Verbose", true)
+viper.Set("LogFile", LogFile)
+```
+
+### Registering and Using Aliases
+
+Aliases permit a single value to be referenced by multiple keys
+
+```go
+viper.RegisterAlias("loud", "Verbose")
+
+viper.Set("verbose", true) // same result as next line
+viper.Set("loud", true) // same result as prior line
+
+viper.GetBool("loud") // true
+viper.GetBool("verbose") // true
+```
+
+### Working with Environment Variables
+
+Viper has full support for environment variables. This enables 12 factor
+applications out of the box. There are five methods that exist to aid working
+with ENV:
+
+ * `AutomaticEnv()`
+ * `BindEnv(string...) : error`
+ * `SetEnvPrefix(string)`
+ * `SetEnvKeyReplacer(string...) *strings.Replacer`
+ * `AllowEmptyEnv(bool)`
+
+_When working with ENV variables, it’s important to recognize that Viper
+treats ENV variables as case sensitive._
+
+Viper provides a mechanism to try to ensure that ENV variables are unique. By
+using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from
+the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
+prefix.
+
+`BindEnv` takes one or more parameters. The first parameter is the key name, the
+rest are the name of the environment variables to bind to this key. If more than
+one are provided, they will take precedence in the specified order. The name of
+the environment variable is case sensitive. If the ENV variable name is not provided, then
+Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter),
+it **does not** automatically add the prefix. For example if the second parameter is "id",
+Viper will look for the ENV variable "ID".
+
+One important thing to recognize when working with ENV variables is that the
+value will be read each time it is accessed. Viper does not fix the value when
+the `BindEnv` is called.
+
+`AutomaticEnv` is a powerful helper especially when combined with
+`SetEnvPrefix`. When called, Viper will check for an environment variable any
+time a `viper.Get` request is made. It will apply the following rules. It will
+check for an environment variable with a name matching the key uppercased and
+prefixed with the `EnvPrefix` if set.
+
+`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env
+keys to an extent. This is useful if you want to use `-` or something in your
+`Get()` calls, but want your environmental variables to use `_` delimiters. An
+example of using it can be found in `viper_test.go`.
+
+Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function.
+Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic.
+
+By default empty environment variables are considered unset and will fall back to
+the next configuration source. To treat empty environment variables as set, use
+the `AllowEmptyEnv` method.
+
+#### Env example
+
+```go
+SetEnvPrefix("spf") // will be uppercased automatically
+BindEnv("id")
+
+os.Setenv("SPF_ID", "13") // typically done outside of the app
+
+id := Get("id") // 13
+```
+
+### Working with Flags
+
+Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
+as used in the [Cobra](https://github.com/spf13/cobra) library.
+
+Like `BindEnv`, the value is not set when the binding method is called, but when
+it is accessed. This means you can bind as early as you want, even in an
+`init()` function.
+
+For individual flags, the `BindPFlag()` method provides this functionality.
+
+Example:
+
+```go
+serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+```
+
+You can also bind an existing set of pflags (pflag.FlagSet):
+
+Example:
+
+```go
+pflag.Int("flagname", 1234, "help message for flagname")
+
+pflag.Parse()
+viper.BindPFlags(pflag.CommandLine)
+
+i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
+```
+
+The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
+the use of other packages that use the [flag](https://golang.org/pkg/flag/)
+package from the standard library. The pflag package can handle the flags
+defined for the flag package by importing these flags. This is accomplished
+by a calling a convenience function provided by the pflag package called
+AddGoFlagSet().
+
+Example:
+
+```go
+package main
+
+import (
+ "flag"
+ "github.com/spf13/pflag"
+)
+
+func main() {
+
+ // using standard library "flag" package
+ flag.Int("flagname", 1234, "help message for flagname")
+
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+ pflag.Parse()
+ viper.BindPFlags(pflag.CommandLine)
+
+ i := viper.GetInt("flagname") // retrieve value from viper
+
+ // ...
+}
+```
+
+#### Flag interfaces
+
+Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
+
+`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
+
+```go
+type myFlag struct {}
+func (f myFlag) HasChanged() bool { return false }
+func (f myFlag) Name() string { return "my-flag-name" }
+func (f myFlag) ValueString() string { return "my-flag-value" }
+func (f myFlag) ValueType() string { return "string" }
+```
+
+Once your flag implements this interface, you can simply tell Viper to bind it:
+
+```go
+viper.BindFlagValue("my-flag-name", myFlag{})
+```
+
+`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
+
+```go
+type myFlagSet struct {
+ flags []myFlag
+}
+
+func (f myFlagSet) VisitAll(fn func(FlagValue)) {
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+```
+
+Once your flag set implements this interface, you can simply tell Viper to bind it:
+
+```go
+fSet := myFlagSet{
+ flags: []myFlag{myFlag{}, myFlag{}},
+}
+viper.BindFlagValues("my-flags", fSet)
+```
+
+### Remote Key/Value Store Support
+
+To enable remote support in Viper, do a blank import of the `viper/remote`
+package:
+
+`import _ "github.com/spf13/viper/remote"`
+
+Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path
+in a Key/Value store such as etcd or Consul. These values take precedence over
+default values, but are overridden by configuration values retrieved from disk,
+flags, or environment variables.
+
+Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve
+configuration from the K/V store, which means that you can store your
+configuration values encrypted and have them automatically decrypted if you have
+the correct gpg keyring. Encryption is optional.
+
+You can use remote configuration in conjunction with local configuration, or
+independently of it.
+
+`crypt` has a command-line helper that you can use to put configurations in your
+K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
+
+```bash
+$ go get github.com/bketelsen/crypt/bin/crypt
+$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
+```
+
+Confirm that your value was set:
+
+```bash
+$ crypt get -plaintext /config/hugo.json
+```
+
+See the `crypt` documentation for examples of how to set encrypted values, or
+how to use Consul.
+
+### Remote Key/Value Store Example - Unencrypted
+
+#### etcd
+```go
+viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+err := viper.ReadRemoteConfig()
+```
+
+#### etcd3
+```go
+viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+err := viper.ReadRemoteConfig()
+```
+
+#### Consul
+You need to set a key to Consul key/value storage with JSON value containing your desired config.
+For example, create a Consul key/value store key `MY_CONSUL_KEY` with value:
+
+```json
+{
+ "port": 8080,
+ "hostname": "myhostname.com"
+}
+```
+
+```go
+viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY")
+viper.SetConfigType("json") // Need to explicitly set this to json
+err := viper.ReadRemoteConfig()
+
+fmt.Println(viper.Get("port")) // 8080
+fmt.Println(viper.Get("hostname")) // myhostname.com
+```
+
+#### Firestore
+
+```go
+viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document")
+viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml"
+err := viper.ReadRemoteConfig()
+```
+
+Of course, you're allowed to use `SecureRemoteProvider` also
+
+### Remote Key/Value Store Example - Encrypted
+
+```go
+viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+err := viper.ReadRemoteConfig()
+```
+
+### Watching Changes in etcd - Unencrypted
+
+```go
+// alternatively, you can create a new viper instance.
+var runtime_viper = viper.New()
+
+runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
+runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
+
+// read from remote config the first time.
+err := runtime_viper.ReadRemoteConfig()
+
+// unmarshal config
+runtime_viper.Unmarshal(&runtime_conf)
+
+// open a goroutine to watch remote changes forever
+go func(){
+ for {
+ time.Sleep(time.Second * 5) // delay after each request
+
+ // currently, only tested with etcd support
+ err := runtime_viper.WatchRemoteConfig()
+ if err != nil {
+ log.Errorf("unable to read remote config: %v", err)
+ continue
+ }
+
+ // unmarshal new config into our runtime config struct. you can also use channel
+ // to implement a signal to notify the system of the changes
+ runtime_viper.Unmarshal(&runtime_conf)
+ }
+}()
+```
+
+## Getting Values From Viper
+
+In Viper, there are a few ways to get a value depending on the value’s type.
+The following functions and methods exist:
+
+ * `Get(key string) : interface{}`
+ * `GetBool(key string) : bool`
+ * `GetFloat64(key string) : float64`
+ * `GetInt(key string) : int`
+ * `GetIntSlice(key string) : []int`
+ * `GetString(key string) : string`
+ * `GetStringMap(key string) : map[string]interface{}`
+ * `GetStringMapString(key string) : map[string]string`
+ * `GetStringSlice(key string) : []string`
+ * `GetTime(key string) : time.Time`
+ * `GetDuration(key string) : time.Duration`
+ * `IsSet(key string) : bool`
+ * `AllSettings() : map[string]interface{}`
+
+One important thing to recognize is that each Get function will return a zero
+value if it’s not found. To check if a given key exists, the `IsSet()` method
+has been provided.
+
+Example:
+```go
+viper.GetString("logfile") // case-insensitive Setting & Getting
+if viper.GetBool("verbose") {
+ fmt.Println("verbose enabled")
+}
+```
+### Accessing nested keys
+
+The accessor methods also accept formatted paths to deeply nested keys. For
+example, if the following JSON file is loaded:
+
+```json
+{
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+```
+
+Viper can access a nested field by passing a `.` delimited path of keys:
+
+```go
+GetString("datastore.metric.host") // (returns "127.0.0.1")
+```
+
+This obeys the precedence rules established above; the search for the path
+will cascade through the remaining configuration registries until found.
+
+For example, given this configuration file, both `datastore.metric.host` and
+`datastore.metric.port` are already defined (and may be overridden). If in addition
+`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
+
+However, if `datastore.metric` was overridden (by a flag, an environment variable,
+the `Set()` method, …) with an immediate value, then all sub-keys of
+`datastore.metric` become undefined, they are “shadowed” by the higher-priority
+configuration level.
+
+Viper can access array indices by using numbers in the path. For example:
+
+```jsonc
+{
+ "host": {
+ "address": "localhost",
+ "ports": [
+ 5799,
+ 6029
+ ]
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+GetInt("host.ports.1") // returns 6029
+
+```
+
+Lastly, if there exists a key that matches the delimited key path, its value
+will be returned instead. E.g.
+
+```jsonc
+{
+ "datastore.metric.host": "0.0.0.0",
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+GetString("datastore.metric.host") // returns "0.0.0.0"
+```
+
+### Extracting a sub-tree
+
+When developing reusable modules, it's often useful to extract a subset of the configuration
+and pass it to a module. This way the module can be instantiated more than once, with different configurations.
+
+For example, an application might use multiple different cache stores for different purposes:
+
+```yaml
+cache:
+ cache1:
+ max-items: 100
+ item-size: 64
+ cache2:
+ max-items: 200
+ item-size: 80
+```
+
+We could pass the cache name to a module (eg. `NewCache("cache1")`),
+but it would require weird concatenation for accessing config keys and would be less separated from the global config.
+
+So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration:
+
+```go
+cache1Config := viper.Sub("cache.cache1")
+if cache1Config == nil { // Sub returns nil if the key cannot be found
+ panic("cache configuration not found")
+}
+
+cache1 := NewCache(cache1Config)
+```
+
+**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found.
+
+Internally, the `NewCache` function can address `max-items` and `item-size` keys directly:
+
+```go
+func NewCache(v *Viper) *Cache {
+ return &Cache{
+ MaxItems: v.GetInt("max-items"),
+ ItemSize: v.GetInt("item-size"),
+ }
+}
+```
+
+The resulting code is easy to test, since it's decoupled from the main config structure,
+and easier to reuse (for the same reason).
+
+
+### Unmarshaling
+
+You also have the option of Unmarshaling all or a specific value to a struct, map,
+etc.
+
+There are two methods to do this:
+
+ * `Unmarshal(rawVal interface{}) : error`
+ * `UnmarshalKey(key string, rawVal interface{}) : error`
+
+Example:
+
+```go
+type config struct {
+ Port int
+ Name string
+ PathMap string `mapstructure:"path_map"`
+}
+
+var C config
+
+err := viper.Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter),
+you have to change the delimiter:
+
+```go
+v := viper.NewWithOptions(viper.KeyDelimiter("::"))
+
+v.SetDefault("chart::values", map[string]interface{}{
+ "ingress": map[string]interface{}{
+ "annotations": map[string]interface{}{
+ "traefik.frontend.rule.type": "PathPrefix",
+ "traefik.ingress.kubernetes.io/ssl-redirect": "true",
+ },
+ },
+})
+
+type config struct {
+ Chart struct{
+ Values map[string]interface{}
+ }
+}
+
+var C config
+
+v.Unmarshal(&C)
+```
+
+Viper also supports unmarshaling into embedded structs:
+
+```go
+/*
+Example config:
+
+module:
+ enabled: true
+ token: 89h3f98hbwf987h3f98wenf89ehf
+*/
+type config struct {
+ Module struct {
+ Enabled bool
+
+ moduleConfig `mapstructure:",squash"`
+ }
+}
+
+// moduleConfig could be in a module specific package
+type moduleConfig struct {
+ Token string
+}
+
+var C config
+
+err := viper.Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default.
+
+### Decoding custom formats
+
+A frequently requested feature for Viper is adding more value formats and decoders.
+For example, parsing character (dot, comma, semicolon, etc) separated strings into slices.
+
+This is already available in Viper using mapstructure decode hooks.
+
+Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/).
+
+### Marshalling to string
+
+You may need to marshal all the settings held in viper into a string rather than write them to a file.
+You can use your favorite format's marshaller with the config returned by `AllSettings()`.
+
+```go
+import (
+ yaml "gopkg.in/yaml.v2"
+ // ...
+)
+
+func yamlStringSettings() string {
+ c := viper.AllSettings()
+ bs, err := yaml.Marshal(c)
+ if err != nil {
+ log.Fatalf("unable to marshal config to YAML: %v", err)
+ }
+ return string(bs)
+}
+```
+
+## Viper or Vipers?
+
+Viper comes ready to use out of the box. There is no configuration or
+initialization needed to begin using Viper. Since most applications will want
+to use a single central repository for their configuration, the viper package
+provides this. It is similar to a singleton.
+
+In all of the examples above, they demonstrate using viper in its singleton
+style approach.
+
+### Working with multiple vipers
+
+You can also create many different vipers for use in your application. Each will
+have its own unique set of configurations and values. Each can read from a
+different config file, key value store, etc. All of the functions that viper
+package supports are mirrored as methods on a viper.
+
+Example:
+
+```go
+x := viper.New()
+y := viper.New()
+
+x.SetDefault("ContentDir", "content")
+y.SetDefault("ContentDir", "foobar")
+
+//...
+```
+
+When working with multiple vipers, it is up to the user to keep track of the
+different vipers.
+
+
+## Q & A
+
+### Why is it called “Viper”?
+
+A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
+to [Cobra](https://github.com/spf13/cobra). While both can operate completely
+independently, together they make a powerful pair to handle much of your
+application foundation needs.
+
+### Why is it called “Cobra”?
+
+Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
+
+### Does Viper support case sensitive keys?
+
+**tl;dr:** No.
+
+Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars).
+In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive.
+
+There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much.
+
+You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9
+
+### Is it safe to concurrently read and write to a viper?
+
+No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic.
+
+## Troubleshooting
+
+See [TROUBLESHOOTING.md](TROUBLESHOOTING.md).
diff --git a/test/integration/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/test/integration/vendor/github.com/spf13/viper/TROUBLESHOOTING.md
new file mode 100644
index 000000000..c4e36c686
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/TROUBLESHOOTING.md
@@ -0,0 +1,32 @@
+# Troubleshooting
+
+## Unmarshaling doesn't work
+
+The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags.
+
+## Cannot find package
+
+Viper installation seems to fail a lot lately with the following (or a similar) error:
+
+```
+cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of:
+/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT)
+/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH)
+```
+
+As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`.
+Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch).
+
+The solution is easy: switch to using Go Modules.
+Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that.
+
+**tl;dr* `export GO111MODULE=on`
+
+## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file
+
+This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740).
+
+Potential solutions are:
+
+1. Quoting values resolved as boolean
+1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build)
diff --git a/test/integration/vendor/github.com/spf13/viper/experimental_logger.go b/test/integration/vendor/github.com/spf13/viper/experimental_logger.go
new file mode 100644
index 000000000..206dad6a0
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/experimental_logger.go
@@ -0,0 +1,11 @@
+//go:build viper_logger
+// +build viper_logger
+
+package viper
+
+// WithLogger sets a custom logger.
+func WithLogger(l Logger) Option {
+ return optionFunc(func(v *Viper) {
+ v.logger = l
+ })
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/flags.go b/test/integration/vendor/github.com/spf13/viper/flags.go
new file mode 100644
index 000000000..b5ddbf5d4
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/flags.go
@@ -0,0 +1,57 @@
+package viper
+
+import "github.com/spf13/pflag"
+
+// FlagValueSet is an interface that users can implement
+// to bind a set of flags to viper.
+type FlagValueSet interface {
+ VisitAll(fn func(FlagValue))
+}
+
+// FlagValue is an interface that users can implement
+// to bind different flags to viper.
+type FlagValue interface {
+ HasChanged() bool
+ Name() string
+ ValueString() string
+ ValueType() string
+}
+
+// pflagValueSet is a wrapper around *pflag.ValueSet
+// that implements FlagValueSet.
+type pflagValueSet struct {
+ flags *pflag.FlagSet
+}
+
+// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
+func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
+ p.flags.VisitAll(func(flag *pflag.Flag) {
+ fn(pflagValue{flag})
+ })
+}
+
+// pflagValue is a wrapper aroung *pflag.flag
+// that implements FlagValue
+type pflagValue struct {
+ flag *pflag.Flag
+}
+
+// HasChanged returns whether the flag has changes or not.
+func (p pflagValue) HasChanged() bool {
+ return p.flag.Changed
+}
+
+// Name returns the name of the flag.
+func (p pflagValue) Name() string {
+ return p.flag.Name
+}
+
+// ValueString returns the value of the flag as a string.
+func (p pflagValue) ValueString() string {
+ return p.flag.Value.String()
+}
+
+// ValueType returns the type of the flag as a string.
+func (p pflagValue) ValueType() string {
+ return p.flag.Value.Type()
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/fs.go b/test/integration/vendor/github.com/spf13/viper/fs.go
new file mode 100644
index 000000000..ecb1769e5
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/fs.go
@@ -0,0 +1,65 @@
+//go:build go1.16 && finder
+// +build go1.16,finder
+
+package viper
+
+import (
+ "errors"
+ "io/fs"
+ "path"
+)
+
+type finder struct {
+ paths []string
+ fileNames []string
+ extensions []string
+
+ withoutExtension bool
+}
+
+func (f finder) Find(fsys fs.FS) (string, error) {
+ for _, searchPath := range f.paths {
+ for _, fileName := range f.fileNames {
+ for _, extension := range f.extensions {
+ filePath := path.Join(searchPath, fileName+"."+extension)
+
+ ok, err := fileExists(fsys, filePath)
+ if err != nil {
+ return "", err
+ }
+
+ if ok {
+ return filePath, nil
+ }
+ }
+
+ if f.withoutExtension {
+ filePath := path.Join(searchPath, fileName)
+
+ ok, err := fileExists(fsys, filePath)
+ if err != nil {
+ return "", err
+ }
+
+ if ok {
+ return filePath, nil
+ }
+ }
+ }
+ }
+
+ return "", nil
+}
+
+func fileExists(fsys fs.FS, filePath string) (bool, error) {
+ fileInfo, err := fs.Stat(fsys, filePath)
+ if err == nil {
+ return !fileInfo.IsDir(), nil
+ }
+
+ if errors.Is(err, fs.ErrNotExist) {
+ return false, nil
+ }
+
+ return false, err
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/decoder.go
new file mode 100644
index 000000000..f472e9ff1
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/decoder.go
@@ -0,0 +1,61 @@
+package encoding
+
+import (
+ "sync"
+)
+
+// Decoder decodes the contents of b into v.
+// It's primarily used for decoding contents of a file into a map[string]interface{}.
+type Decoder interface {
+ Decode(b []byte, v map[string]interface{}) error
+}
+
+const (
+ // ErrDecoderNotFound is returned when there is no decoder registered for a format.
+ ErrDecoderNotFound = encodingError("decoder not found for this format")
+
+ // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format.
+ ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format")
+)
+
+// DecoderRegistry can choose an appropriate Decoder based on the provided format.
+type DecoderRegistry struct {
+ decoders map[string]Decoder
+
+ mu sync.RWMutex
+}
+
+// NewDecoderRegistry returns a new, initialized DecoderRegistry.
+func NewDecoderRegistry() *DecoderRegistry {
+ return &DecoderRegistry{
+ decoders: make(map[string]Decoder),
+ }
+}
+
+// RegisterDecoder registers a Decoder for a format.
+// Registering a Decoder for an already existing format is not supported.
+func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ if _, ok := e.decoders[format]; ok {
+ return ErrDecoderFormatAlreadyRegistered
+ }
+
+ e.decoders[format] = enc
+
+ return nil
+}
+
+// Decode calls the underlying Decoder based on the format.
+func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]interface{}) error {
+ e.mu.RLock()
+ decoder, ok := e.decoders[format]
+ e.mu.RUnlock()
+
+ if !ok {
+ return ErrDecoderNotFound
+ }
+
+ return decoder.Decode(b, v)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go
new file mode 100644
index 000000000..4485063b6
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go
@@ -0,0 +1,61 @@
+package dotenv
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/subosito/gotenv"
+)
+
+const keyDelimiter = "_"
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables
+// (commonly called as dotenv format).
+type Codec struct{}
+
+func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ flattened := map[string]interface{}{}
+
+ flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter)
+
+ keys := make([]string, 0, len(flattened))
+
+ for key := range flattened {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ var buf bytes.Buffer
+
+ for _, key := range keys {
+ _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key]))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (Codec) Decode(b []byte, v map[string]interface{}) error {
+ var buf bytes.Buffer
+
+ _, err := buf.Write(b)
+ if err != nil {
+ return err
+ }
+
+ env, err := gotenv.StrictParse(&buf)
+ if err != nil {
+ return err
+ }
+
+ for key, value := range env {
+ v[key] = value
+ }
+
+ return nil
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go
new file mode 100644
index 000000000..ce6e6efa3
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go
@@ -0,0 +1,41 @@
+package dotenv
+
+import (
+ "strings"
+
+ "github.com/spf13/cast"
+)
+
+// flattenAndMergeMap recursively flattens the given map into a new map
+// Code is based on the function with the same name in tha main package.
+// TODO: move it to a common place
+func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} {
+ if shadow != nil && prefix != "" && shadow[prefix] != nil {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]interface{})
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += delimiter
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = val
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter)
+ }
+ return shadow
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/encoder.go
new file mode 100644
index 000000000..2341bf235
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/encoder.go
@@ -0,0 +1,60 @@
+package encoding
+
+import (
+ "sync"
+)
+
+// Encoder encodes the contents of v into a byte representation.
+// It's primarily used for encoding a map[string]interface{} into a file format.
+type Encoder interface {
+ Encode(v map[string]interface{}) ([]byte, error)
+}
+
+const (
+ // ErrEncoderNotFound is returned when there is no encoder registered for a format.
+ ErrEncoderNotFound = encodingError("encoder not found for this format")
+
+ // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format.
+ ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format")
+)
+
+// EncoderRegistry can choose an appropriate Encoder based on the provided format.
+type EncoderRegistry struct {
+ encoders map[string]Encoder
+
+ mu sync.RWMutex
+}
+
+// NewEncoderRegistry returns a new, initialized EncoderRegistry.
+func NewEncoderRegistry() *EncoderRegistry {
+ return &EncoderRegistry{
+ encoders: make(map[string]Encoder),
+ }
+}
+
+// RegisterEncoder registers an Encoder for a format.
+// Registering a Encoder for an already existing format is not supported.
+func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ if _, ok := e.encoders[format]; ok {
+ return ErrEncoderFormatAlreadyRegistered
+ }
+
+ e.encoders[format] = enc
+
+ return nil
+}
+
+func (e *EncoderRegistry) Encode(format string, v map[string]interface{}) ([]byte, error) {
+ e.mu.RLock()
+ encoder, ok := e.encoders[format]
+ e.mu.RUnlock()
+
+ if !ok {
+ return nil, ErrEncoderNotFound
+ }
+
+ return encoder.Encode(v)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/error.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/error.go
new file mode 100644
index 000000000..e4cde02d7
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/error.go
@@ -0,0 +1,7 @@
+package encoding
+
+type encodingError string
+
+func (e encodingError) Error() string {
+ return string(e)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go
new file mode 100644
index 000000000..7fde8e4bc
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go
@@ -0,0 +1,40 @@
+package hcl
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/printer"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding.
+// TODO: add printer config to the codec?
+type Codec struct{}
+
+func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: use printer.Format? Is the trailing newline an issue?
+
+ ast, err := hcl.Parse(string(b))
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+
+ err = printer.Fprint(&buf, ast.Node)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (Codec) Decode(b []byte, v map[string]interface{}) error {
+ return hcl.Unmarshal(b, &v)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go
new file mode 100644
index 000000000..9acd87fc3
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go
@@ -0,0 +1,99 @@
+package ini
+
+import (
+ "bytes"
+ "sort"
+ "strings"
+
+ "github.com/spf13/cast"
+ "gopkg.in/ini.v1"
+)
+
+// LoadOptions contains all customized options used for load data source(s).
+// This type is added here for convenience: this way consumers can import a single package called "ini".
+type LoadOptions = ini.LoadOptions
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding.
+type Codec struct {
+ KeyDelimiter string
+ LoadOptions LoadOptions
+}
+
+func (c Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ cfg := ini.Empty()
+ ini.PrettyFormat = false
+
+ flattened := map[string]interface{}{}
+
+ flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter())
+
+ keys := make([]string, 0, len(flattened))
+
+ for key := range flattened {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ sectionName, keyName := "", key
+
+ lastSep := strings.LastIndex(key, ".")
+ if lastSep != -1 {
+ sectionName = key[:(lastSep)]
+ keyName = key[(lastSep + 1):]
+ }
+
+ // TODO: is this a good idea?
+ if sectionName == "default" {
+ sectionName = ""
+ }
+
+ cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key]))
+ }
+
+ var buf bytes.Buffer
+
+ _, err := cfg.WriteTo(&buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (c Codec) Decode(b []byte, v map[string]interface{}) error {
+ cfg := ini.Empty(c.LoadOptions)
+
+ err := cfg.Append(b)
+ if err != nil {
+ return err
+ }
+
+ sections := cfg.Sections()
+
+ for i := 0; i < len(sections); i++ {
+ section := sections[i]
+ keys := section.Keys()
+
+ for j := 0; j < len(keys); j++ {
+ key := keys[j]
+ value := cfg.Section(section.Name()).Key(key.Name()).String()
+
+ deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter()))
+
+ // set innermost value
+ deepestMap[key.Name()] = value
+ }
+ }
+
+ return nil
+}
+
+func (c Codec) keyDelimiter() string {
+ if c.KeyDelimiter == "" {
+ return "."
+ }
+
+ return c.KeyDelimiter
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go
new file mode 100644
index 000000000..8329856b5
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go
@@ -0,0 +1,74 @@
+package ini
+
+import (
+ "strings"
+
+ "github.com/spf13/cast"
+)
+
+// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED
+// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+ for _, k := range path {
+ m2, ok := m[k]
+ if !ok {
+ // intermediate key does not exist
+ // => create it and continue from there
+ m3 := make(map[string]interface{})
+ m[k] = m3
+ m = m3
+ continue
+ }
+ m3, ok := m2.(map[string]interface{})
+ if !ok {
+ // intermediate key is a value
+ // => replace with a new map
+ m3 = make(map[string]interface{})
+ m[k] = m3
+ }
+ // continue search from here
+ m = m3
+ }
+ return m
+}
+
+// flattenAndMergeMap recursively flattens the given map into a new map
+// Code is based on the function with the same name in tha main package.
+// TODO: move it to a common place
+func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} {
+ if shadow != nil && prefix != "" && shadow[prefix] != nil {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]interface{})
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += delimiter
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = val
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter)
+ }
+ return shadow
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go
new file mode 100644
index 000000000..b8a2251c1
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go
@@ -0,0 +1,86 @@
+package javaproperties
+
+import (
+ "bytes"
+ "sort"
+ "strings"
+
+ "github.com/magiconair/properties"
+ "github.com/spf13/cast"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding.
+type Codec struct {
+ KeyDelimiter string
+
+ // Store read properties on the object so that we can write back in order with comments.
+ // This will only be used if the configuration read is a properties file.
+ // TODO: drop this feature in v2
+ // TODO: make use of the global properties object optional
+ Properties *properties.Properties
+}
+
+func (c *Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ if c.Properties == nil {
+ c.Properties = properties.NewProperties()
+ }
+
+ flattened := map[string]interface{}{}
+
+ flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter())
+
+ keys := make([]string, 0, len(flattened))
+
+ for key := range flattened {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ _, _, err := c.Properties.Set(key, cast.ToString(flattened[key]))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var buf bytes.Buffer
+
+ _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (c *Codec) Decode(b []byte, v map[string]interface{}) error {
+ var err error
+ c.Properties, err = properties.Load(b, properties.UTF8)
+ if err != nil {
+ return err
+ }
+
+ for _, key := range c.Properties.Keys() {
+ // ignore existence check: we know it's there
+ value, _ := c.Properties.Get(key)
+
+ // recursively build nested maps
+ path := strings.Split(key, c.keyDelimiter())
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+
+ return nil
+}
+
+func (c Codec) keyDelimiter() string {
+ if c.KeyDelimiter == "" {
+ return "."
+ }
+
+ return c.KeyDelimiter
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go
new file mode 100644
index 000000000..93755cac1
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go
@@ -0,0 +1,74 @@
+package javaproperties
+
+import (
+ "strings"
+
+ "github.com/spf13/cast"
+)
+
+// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED
+// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+ for _, k := range path {
+ m2, ok := m[k]
+ if !ok {
+ // intermediate key does not exist
+ // => create it and continue from there
+ m3 := make(map[string]interface{})
+ m[k] = m3
+ m = m3
+ continue
+ }
+ m3, ok := m2.(map[string]interface{})
+ if !ok {
+ // intermediate key is a value
+ // => replace with a new map
+ m3 = make(map[string]interface{})
+ m[k] = m3
+ }
+ // continue search from here
+ m = m3
+ }
+ return m
+}
+
+// flattenAndMergeMap recursively flattens the given map into a new map
+// Code is based on the function with the same name in tha main package.
+// TODO: move it to a common place
+func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} {
+ if shadow != nil && prefix != "" && shadow[prefix] != nil {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]interface{})
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += delimiter
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = val
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter)
+ }
+ return shadow
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/json/codec.go
new file mode 100644
index 000000000..1b7caaceb
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/json/codec.go
@@ -0,0 +1,17 @@
+package json
+
+import (
+ "encoding/json"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding.
+type Codec struct{}
+
+func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ // TODO: expose prefix and indent in the Codec as setting?
+ return json.MarshalIndent(v, "", " ")
+}
+
+func (Codec) Decode(b []byte, v map[string]interface{}) error {
+ return json.Unmarshal(b, &v)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
new file mode 100644
index 000000000..45fddc8b5
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
@@ -0,0 +1,39 @@
+//go:build viper_toml1
+// +build viper_toml1
+
+package toml
+
+import (
+ "github.com/pelletier/go-toml"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding.
+type Codec struct{}
+
+func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ t, err := toml.TreeFromMap(v)
+ if err != nil {
+ return nil, err
+ }
+
+ s, err := t.ToTomlString()
+ if err != nil {
+ return nil, err
+ }
+
+ return []byte(s), nil
+}
+
+func (Codec) Decode(b []byte, v map[string]interface{}) error {
+ tree, err := toml.LoadBytes(b)
+ if err != nil {
+ return err
+ }
+
+ tmap := tree.ToMap()
+ for key, value := range tmap {
+ v[key] = value
+ }
+
+ return nil
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
new file mode 100644
index 000000000..112c6d372
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
@@ -0,0 +1,19 @@
+//go:build !viper_toml1
+// +build !viper_toml1
+
+package toml
+
+import (
+ "github.com/pelletier/go-toml/v2"
+)
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding.
+type Codec struct{}
+
+func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ return toml.Marshal(v)
+}
+
+func (Codec) Decode(b []byte, v map[string]interface{}) error {
+ return toml.Unmarshal(b, &v)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
new file mode 100644
index 000000000..24cc19dfc
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
@@ -0,0 +1,14 @@
+package yaml
+
+// import "gopkg.in/yaml.v2"
+
+// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding.
+type Codec struct{}
+
+func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
+ return yaml.Marshal(v)
+}
+
+func (Codec) Decode(b []byte, v map[string]interface{}) error {
+ return yaml.Unmarshal(b, &v)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
new file mode 100644
index 000000000..4c398c2f4
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
@@ -0,0 +1,14 @@
+//go:build viper_yaml2
+// +build viper_yaml2
+
+package yaml
+
+import yamlv2 "gopkg.in/yaml.v2"
+
+var yaml = struct {
+ Marshal func(in interface{}) (out []byte, err error)
+ Unmarshal func(in []byte, out interface{}) (err error)
+}{
+ Marshal: yamlv2.Marshal,
+ Unmarshal: yamlv2.Unmarshal,
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
new file mode 100644
index 000000000..3a4775ced
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
@@ -0,0 +1,14 @@
+//go:build !viper_yaml2
+// +build !viper_yaml2
+
+package yaml
+
+import yamlv3 "gopkg.in/yaml.v3"
+
+var yaml = struct {
+ Marshal func(in interface{}) (out []byte, err error)
+ Unmarshal func(in []byte, out interface{}) (err error)
+}{
+ Marshal: yamlv3.Marshal,
+ Unmarshal: yamlv3.Unmarshal,
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/logger.go b/test/integration/vendor/github.com/spf13/viper/logger.go
new file mode 100644
index 000000000..a64e1446c
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/logger.go
@@ -0,0 +1,77 @@
+package viper
+
+import (
+ "fmt"
+
+ jww "github.com/spf13/jwalterweatherman"
+)
+
+// Logger is a unified interface for various logging use cases and practices, including:
+// - leveled logging
+// - structured logging
+type Logger interface {
+ // Trace logs a Trace event.
+ //
+ // Even more fine-grained information than Debug events.
+ // Loggers not supporting this level should fall back to Debug.
+ Trace(msg string, keyvals ...interface{})
+
+ // Debug logs a Debug event.
+ //
+ // A verbose series of information events.
+ // They are useful when debugging the system.
+ Debug(msg string, keyvals ...interface{})
+
+ // Info logs an Info event.
+ //
+ // General information about what's happening inside the system.
+ Info(msg string, keyvals ...interface{})
+
+ // Warn logs a Warn(ing) event.
+ //
+ // Non-critical events that should be looked at.
+ Warn(msg string, keyvals ...interface{})
+
+ // Error logs an Error event.
+ //
+ // Critical events that require immediate attention.
+ // Loggers commonly provide Fatal and Panic levels above Error level,
+ // but exiting and panicing is out of scope for a logging library.
+ Error(msg string, keyvals ...interface{})
+}
+
+type jwwLogger struct{}
+
+func (jwwLogger) Trace(msg string, keyvals ...interface{}) {
+ jww.TRACE.Printf(jwwLogMessage(msg, keyvals...))
+}
+
+func (jwwLogger) Debug(msg string, keyvals ...interface{}) {
+ jww.DEBUG.Printf(jwwLogMessage(msg, keyvals...))
+}
+
+func (jwwLogger) Info(msg string, keyvals ...interface{}) {
+ jww.INFO.Printf(jwwLogMessage(msg, keyvals...))
+}
+
+func (jwwLogger) Warn(msg string, keyvals ...interface{}) {
+ jww.WARN.Printf(jwwLogMessage(msg, keyvals...))
+}
+
+func (jwwLogger) Error(msg string, keyvals ...interface{}) {
+ jww.ERROR.Printf(jwwLogMessage(msg, keyvals...))
+}
+
+func jwwLogMessage(msg string, keyvals ...interface{}) string {
+ out := msg
+
+ if len(keyvals) > 0 && len(keyvals)%2 == 1 {
+ keyvals = append(keyvals, nil)
+ }
+
+ for i := 0; i <= len(keyvals)-2; i += 2 {
+ out = fmt.Sprintf("%s %v=%v", out, keyvals[i], keyvals[i+1])
+ }
+
+ return out
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/util.go b/test/integration/vendor/github.com/spf13/viper/util.go
new file mode 100644
index 000000000..64e657505
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/util.go
@@ -0,0 +1,217 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "github.com/spf13/cast"
+)
+
+// ConfigParseError denotes failing to parse configuration file.
+type ConfigParseError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (pe ConfigParseError) Error() string {
+ return fmt.Sprintf("While parsing config: %s", pe.err.Error())
+}
+
+// toCaseInsensitiveValue checks if the value is a map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+ switch v := value.(type) {
+ case map[interface{}]interface{}:
+ value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ value = copyAndInsensitiviseMap(v)
+ }
+
+ return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+ nm := make(map[string]interface{})
+
+ for key, val := range m {
+ lkey := strings.ToLower(key)
+ switch v := val.(type) {
+ case map[interface{}]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(v)
+ default:
+ nm[lkey] = v
+ }
+ }
+
+ return nm
+}
+
+func insensitiviseVal(val interface{}) interface{} {
+ switch val.(type) {
+ case map[interface{}]interface{}:
+ // nested map: cast and recursively insensitivise
+ val = cast.ToStringMap(val)
+ insensitiviseMap(val.(map[string]interface{}))
+ case map[string]interface{}:
+ // nested map: recursively insensitivise
+ insensitiviseMap(val.(map[string]interface{}))
+ case []interface{}:
+ // nested array: recursively insensitivise
+ insensitiveArray(val.([]interface{}))
+ }
+ return val
+}
+
+func insensitiviseMap(m map[string]interface{}) {
+ for key, val := range m {
+ val = insensitiviseVal(val)
+ lower := strings.ToLower(key)
+ if key != lower {
+ // remove old key (not lower-cased)
+ delete(m, key)
+ }
+ // update map
+ m[lower] = val
+ }
+}
+
+func insensitiveArray(a []interface{}) {
+ for i, val := range a {
+ a[i] = insensitiviseVal(val)
+ }
+}
+
+func absPathify(logger Logger, inPath string) string {
+ logger.Info("trying to resolve absolute path", "path", inPath)
+
+ if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) {
+ inPath = userHomeDir() + inPath[5:]
+ }
+
+ inPath = os.ExpandEnv(inPath)
+
+ if filepath.IsAbs(inPath) {
+ return filepath.Clean(inPath)
+ }
+
+ p, err := filepath.Abs(inPath)
+ if err == nil {
+ return filepath.Clean(p)
+ }
+
+ logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error())
+
+ return ""
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" {
+ home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+ if home == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ return home
+ }
+ return os.Getenv("HOME")
+}
+
+func safeMul(a, b uint) uint {
+ c := a * b
+ if a > 1 && b > 1 && c/b != a {
+ return 0
+ }
+ return c
+}
+
+// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+func parseSizeInBytes(sizeStr string) uint {
+ sizeStr = strings.TrimSpace(sizeStr)
+ lastChar := len(sizeStr) - 1
+ multiplier := uint(1)
+
+ if lastChar > 0 {
+ if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+ if lastChar > 1 {
+ switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+ case 'k':
+ multiplier = 1 << 10
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'm':
+ multiplier = 1 << 20
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'g':
+ multiplier = 1 << 30
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ default:
+ multiplier = 1
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar])
+ }
+ }
+ }
+ }
+
+ size := cast.ToInt(sizeStr)
+ if size < 0 {
+ size = 0
+ }
+
+ return safeMul(uint(size), multiplier)
+}
+
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+ for _, k := range path {
+ m2, ok := m[k]
+ if !ok {
+ // intermediate key does not exist
+ // => create it and continue from there
+ m3 := make(map[string]interface{})
+ m[k] = m3
+ m = m3
+ continue
+ }
+ m3, ok := m2.(map[string]interface{})
+ if !ok {
+ // intermediate key is a value
+ // => replace with a new map
+ m3 = make(map[string]interface{})
+ m[k] = m3
+ }
+ // continue search from here
+ m = m3
+ }
+ return m
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/viper.go b/test/integration/vendor/github.com/spf13/viper/viper.go
new file mode 100644
index 000000000..5c12529b4
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/viper.go
@@ -0,0 +1,2149 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is an application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+// Each item takes precedence over the item below it:
+
+// overrides
+// flag
+// env
+// config
+// key/value store
+// default
+
+package viper
+
+import (
+ "bytes"
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/mitchellh/mapstructure"
+ "github.com/spf13/afero"
+ "github.com/spf13/cast"
+ "github.com/spf13/pflag"
+
+ "github.com/spf13/viper/internal/encoding"
+ "github.com/spf13/viper/internal/encoding/dotenv"
+ "github.com/spf13/viper/internal/encoding/hcl"
+ "github.com/spf13/viper/internal/encoding/ini"
+ "github.com/spf13/viper/internal/encoding/javaproperties"
+ "github.com/spf13/viper/internal/encoding/json"
+ "github.com/spf13/viper/internal/encoding/toml"
+ "github.com/spf13/viper/internal/encoding/yaml"
+)
+
+// ConfigMarshalError happens when failing to marshal the configuration.
+type ConfigMarshalError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (e ConfigMarshalError) Error() string {
+ return fmt.Sprintf("While marshaling config: %s", e.err.Error())
+}
+
+var v *Viper
+
+type RemoteResponse struct {
+ Value []byte
+ Error error
+}
+
+func init() {
+ v = New()
+}
+
+type remoteConfigFactory interface {
+ Get(rp RemoteProvider) (io.Reader, error)
+ Watch(rp RemoteProvider) (io.Reader, error)
+ WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
+}
+
+// RemoteConfig is optional, see the remote package
+var RemoteConfig remoteConfigFactory
+
+// UnsupportedConfigError denotes encountering an unsupported
+// configuration filetype.
+type UnsupportedConfigError string
+
+// Error returns the formatted configuration error.
+func (str UnsupportedConfigError) Error() string {
+ return fmt.Sprintf("Unsupported Config Type %q", string(str))
+}
+
+// UnsupportedRemoteProviderError denotes encountering an unsupported remote
+// provider. Currently only etcd and Consul are supported.
+type UnsupportedRemoteProviderError string
+
+// Error returns the formatted remote provider error.
+func (str UnsupportedRemoteProviderError) Error() string {
+ return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
+}
+
+// RemoteConfigError denotes encountering an error while trying to
+// pull the configuration from the remote provider.
+type RemoteConfigError string
+
+// Error returns the formatted remote provider error
+func (rce RemoteConfigError) Error() string {
+ return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
+}
+
+// ConfigFileNotFoundError denotes failing to find configuration file.
+type ConfigFileNotFoundError struct {
+ name, locations string
+}
+
+// Error returns the formatted configuration error.
+func (fnfe ConfigFileNotFoundError) Error() string {
+ return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
+}
+
+// ConfigFileAlreadyExistsError denotes failure to write new configuration file.
+type ConfigFileAlreadyExistsError string
+
+// Error returns the formatted error when configuration already exists.
+func (faee ConfigFileAlreadyExistsError) Error() string {
+ return fmt.Sprintf("Config File %q Already Exists", string(faee))
+}
+
+// A DecoderConfigOption can be passed to viper.Unmarshal to configure
+// mapstructure.DecoderConfig options
+type DecoderConfigOption func(*mapstructure.DecoderConfig)
+
+// DecodeHook returns a DecoderConfigOption which overrides the default
+// DecoderConfig.DecodeHook value, the default is:
+//
+// mapstructure.ComposeDecodeHookFunc(
+// mapstructure.StringToTimeDurationHookFunc(),
+// mapstructure.StringToSliceHookFunc(","),
+// )
+func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
+ return func(c *mapstructure.DecoderConfig) {
+ c.DecodeHook = hook
+ }
+}
+
+// Viper is a prioritized configuration registry. It
+// maintains a set of configuration sources, fetches
+// values to populate those, and provides them according
+// to the source's priority.
+// The priority of the sources is the following:
+// 1. overrides
+// 2. flags
+// 3. env. variables
+// 4. config file
+// 5. key/value store
+// 6. defaults
+//
+// For example, if values from the following sources were loaded:
+//
+// Defaults : {
+// "secret": "",
+// "user": "default",
+// "endpoint": "https://localhost"
+// }
+// Config : {
+// "user": "root"
+// "secret": "defaultsecret"
+// }
+// Env : {
+// "secret": "somesecretkey"
+// }
+//
+// The resulting config will have the following values:
+//
+// {
+// "secret": "somesecretkey",
+// "user": "root",
+// "endpoint": "https://localhost"
+// }
+//
+// Note: Vipers are not safe for concurrent Get() and Set() operations.
+type Viper struct {
+ // Delimiter that separates a list of keys
+ // used to access a nested value in one go
+ keyDelim string
+
+ // A set of paths to look for the config file in
+ configPaths []string
+
+ // The filesystem to read config from.
+ fs afero.Fs
+
+ // A set of remote providers to search for the configuration
+ remoteProviders []*defaultRemoteProvider
+
+ // Name of file to look for inside the path
+ configName string
+ configFile string
+ configType string
+ configPermissions os.FileMode
+ envPrefix string
+
+ // Specific commands for ini parsing
+ iniLoadOptions ini.LoadOptions
+
+ automaticEnvApplied bool
+ envKeyReplacer StringReplacer
+ allowEmptyEnv bool
+
+ config map[string]interface{}
+ override map[string]interface{}
+ defaults map[string]interface{}
+ kvstore map[string]interface{}
+ pflags map[string]FlagValue
+ env map[string][]string
+ aliases map[string]string
+ typeByDefValue bool
+
+ onConfigChange func(fsnotify.Event)
+
+ logger Logger
+
+ // TODO: should probably be protected with a mutex
+ encoderRegistry *encoding.EncoderRegistry
+ decoderRegistry *encoding.DecoderRegistry
+}
+
+// New returns an initialized Viper instance.
+func New() *Viper {
+ v := new(Viper)
+ v.keyDelim = "."
+ v.configName = "config"
+ v.configPermissions = os.FileMode(0o644)
+ v.fs = afero.NewOsFs()
+ v.config = make(map[string]interface{})
+ v.override = make(map[string]interface{})
+ v.defaults = make(map[string]interface{})
+ v.kvstore = make(map[string]interface{})
+ v.pflags = make(map[string]FlagValue)
+ v.env = make(map[string][]string)
+ v.aliases = make(map[string]string)
+ v.typeByDefValue = false
+ v.logger = jwwLogger{}
+
+ v.resetEncoding()
+
+ return v
+}
+
+// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney.
+// If you're unfamiliar with this style,
+// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis.
+type Option interface {
+ apply(v *Viper)
+}
+
+type optionFunc func(v *Viper)
+
+func (fn optionFunc) apply(v *Viper) {
+ fn(v)
+}
+
+// KeyDelimiter sets the delimiter used for determining key parts.
+// By default it's value is ".".
+func KeyDelimiter(d string) Option {
+ return optionFunc(func(v *Viper) {
+ v.keyDelim = d
+ })
+}
+
+// StringReplacer applies a set of replacements to a string.
+type StringReplacer interface {
+ // Replace returns a copy of s with all replacements performed.
+ Replace(s string) string
+}
+
+// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys.
+func EnvKeyReplacer(r StringReplacer) Option {
+ return optionFunc(func(v *Viper) {
+ v.envKeyReplacer = r
+ })
+}
+
+// NewWithOptions creates a new Viper instance.
+func NewWithOptions(opts ...Option) *Viper {
+ v := New()
+
+ for _, opt := range opts {
+ opt.apply(v)
+ }
+
+ v.resetEncoding()
+
+ return v
+}
+
+// Reset is intended for testing, will reset all to default settings.
+// In the public interface for the viper package so applications
+// can use it in their testing as well.
+func Reset() {
+ v = New()
+ SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"}
+ SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"}
+}
+
+// TODO: make this lazy initialization instead
+func (v *Viper) resetEncoding() {
+ encoderRegistry := encoding.NewEncoderRegistry()
+ decoderRegistry := encoding.NewDecoderRegistry()
+
+ {
+ codec := yaml.Codec{}
+
+ encoderRegistry.RegisterEncoder("yaml", codec)
+ decoderRegistry.RegisterDecoder("yaml", codec)
+
+ encoderRegistry.RegisterEncoder("yml", codec)
+ decoderRegistry.RegisterDecoder("yml", codec)
+ }
+
+ {
+ codec := json.Codec{}
+
+ encoderRegistry.RegisterEncoder("json", codec)
+ decoderRegistry.RegisterDecoder("json", codec)
+ }
+
+ {
+ codec := toml.Codec{}
+
+ encoderRegistry.RegisterEncoder("toml", codec)
+ decoderRegistry.RegisterDecoder("toml", codec)
+ }
+
+ {
+ codec := hcl.Codec{}
+
+ encoderRegistry.RegisterEncoder("hcl", codec)
+ decoderRegistry.RegisterDecoder("hcl", codec)
+
+ encoderRegistry.RegisterEncoder("tfvars", codec)
+ decoderRegistry.RegisterDecoder("tfvars", codec)
+ }
+
+ {
+ codec := ini.Codec{
+ KeyDelimiter: v.keyDelim,
+ LoadOptions: v.iniLoadOptions,
+ }
+
+ encoderRegistry.RegisterEncoder("ini", codec)
+ decoderRegistry.RegisterDecoder("ini", codec)
+ }
+
+ {
+ codec := &javaproperties.Codec{
+ KeyDelimiter: v.keyDelim,
+ }
+
+ encoderRegistry.RegisterEncoder("properties", codec)
+ decoderRegistry.RegisterDecoder("properties", codec)
+
+ encoderRegistry.RegisterEncoder("props", codec)
+ decoderRegistry.RegisterDecoder("props", codec)
+
+ encoderRegistry.RegisterEncoder("prop", codec)
+ decoderRegistry.RegisterDecoder("prop", codec)
+ }
+
+ {
+ codec := &dotenv.Codec{}
+
+ encoderRegistry.RegisterEncoder("dotenv", codec)
+ decoderRegistry.RegisterDecoder("dotenv", codec)
+
+ encoderRegistry.RegisterEncoder("env", codec)
+ decoderRegistry.RegisterDecoder("env", codec)
+ }
+
+ v.encoderRegistry = encoderRegistry
+ v.decoderRegistry = decoderRegistry
+}
+
+type defaultRemoteProvider struct {
+ provider string
+ endpoint string
+ path string
+ secretKeyring string
+}
+
+func (rp defaultRemoteProvider) Provider() string {
+ return rp.provider
+}
+
+func (rp defaultRemoteProvider) Endpoint() string {
+ return rp.endpoint
+}
+
+func (rp defaultRemoteProvider) Path() string {
+ return rp.path
+}
+
+func (rp defaultRemoteProvider) SecretKeyring() string {
+ return rp.secretKeyring
+}
+
+// RemoteProvider stores the configuration necessary
+// to connect to a remote key/value store.
+// Optional secretKeyring to unencrypt encrypted values
+// can be provided.
+type RemoteProvider interface {
+ Provider() string
+ Endpoint() string
+ Path() string
+ SecretKeyring() string
+}
+
+// SupportedExts are universally supported extensions.
+var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"}
+
+// SupportedRemoteProviders are universally supported remote providers.
+var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"}
+
+func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
+ v.onConfigChange = run
+}
+
+func WatchConfig() { v.WatchConfig() }
+
+func (v *Viper) WatchConfig() {
+ initWG := sync.WaitGroup{}
+ initWG.Add(1)
+ go func() {
+ watcher, err := newWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+ // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
+ filename, err := v.getConfigFile()
+ if err != nil {
+ log.Printf("error: %v\n", err)
+ initWG.Done()
+ return
+ }
+
+ configFile := filepath.Clean(filename)
+ configDir, _ := filepath.Split(configFile)
+ realConfigFile, _ := filepath.EvalSymlinks(filename)
+
+ eventsWG := sync.WaitGroup{}
+ eventsWG.Add(1)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok { // 'Events' channel is closed
+ eventsWG.Done()
+ return
+ }
+ currentConfigFile, _ := filepath.EvalSymlinks(filename)
+ // we only care about the config file with the following cases:
+ // 1 - if the config file was modified or created
+ // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
+ if (filepath.Clean(event.Name) == configFile &&
+ (event.Has(fsnotify.Write) || event.Has(fsnotify.Create))) ||
+ (currentConfigFile != "" && currentConfigFile != realConfigFile) {
+ realConfigFile = currentConfigFile
+ err := v.ReadInConfig()
+ if err != nil {
+ log.Printf("error reading config file: %v\n", err)
+ }
+ if v.onConfigChange != nil {
+ v.onConfigChange(event)
+ }
+ } else if filepath.Clean(event.Name) == configFile && event.Has(fsnotify.Remove) {
+ eventsWG.Done()
+ return
+ }
+
+ case err, ok := <-watcher.Errors:
+ if ok { // 'Errors' channel is not closed
+ log.Printf("watcher error: %v\n", err)
+ }
+ eventsWG.Done()
+ return
+ }
+ }
+ }()
+ watcher.Add(configDir)
+ initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on...
+ eventsWG.Wait() // now, wait for event loop to end in this go-routine...
+ }()
+ initWG.Wait() // make sure that the go routine above fully ended before returning
+}
+
+// SetConfigFile explicitly defines the path, name and extension of the config file.
+// Viper will use this and not check any of the config paths.
+func SetConfigFile(in string) { v.SetConfigFile(in) }
+
+func (v *Viper) SetConfigFile(in string) {
+ if in != "" {
+ v.configFile = in
+ }
+}
+
+// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
+// E.g. if your prefix is "spf", the env registry will look for env
+// variables that start with "SPF_".
+func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
+
+func (v *Viper) SetEnvPrefix(in string) {
+ if in != "" {
+ v.envPrefix = in
+ }
+}
+
+func (v *Viper) mergeWithEnvPrefix(in string) string {
+ if v.envPrefix != "" {
+ return strings.ToUpper(v.envPrefix + "_" + in)
+ }
+
+ return strings.ToUpper(in)
+}
+
+// AllowEmptyEnv tells Viper to consider set,
+// but empty environment variables as valid values instead of falling back.
+// For backward compatibility reasons this is false by default.
+func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
+
+func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
+ v.allowEmptyEnv = allowEmptyEnv
+}
+
+// TODO: should getEnv logic be moved into find(). Can generalize the use of
+// rewriting keys many things, Ex: Get('someKey') -> some_key
+// (camel case to snake case for JSON keys perhaps)
+
+// getEnv is a wrapper around os.Getenv which replaces characters in the original
+// key. This allows env vars which have different keys than the config object
+// keys.
+func (v *Viper) getEnv(key string) (string, bool) {
+ if v.envKeyReplacer != nil {
+ key = v.envKeyReplacer.Replace(key)
+ }
+
+ val, ok := os.LookupEnv(key)
+
+ return val, ok && (v.allowEmptyEnv || val != "")
+}
+
+// ConfigFileUsed returns the file used to populate the config registry.
+func ConfigFileUsed() string { return v.ConfigFileUsed() }
+func (v *Viper) ConfigFileUsed() string { return v.configFile }
+
+// AddConfigPath adds a path for Viper to search for the config file in.
+// Can be called multiple times to define multiple search paths.
+func AddConfigPath(in string) { v.AddConfigPath(in) }
+
+func (v *Viper) AddConfigPath(in string) {
+ if in != "" {
+ absin := absPathify(v.logger, in)
+
+ v.logger.Info("adding path to search paths", "path", absin)
+ if !stringInSlice(absin, v.configPaths) {
+ v.configPaths = append(v.configPaths, absin)
+ }
+ }
+}
+
+// AddRemoteProvider adds a remote configuration source.
+// Remote Providers are searched in the order they are added.
+// provider is a string value: "etcd", "etcd3", "consul" or "firestore" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+func AddRemoteProvider(provider, endpoint, path string) error {
+ return v.AddRemoteProvider(provider, endpoint, path)
+}
+
+func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint)
+
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+// AddSecureRemoteProvider adds a remote configuration source.
+// Secure Remote Providers are searched in the order they are added.
+// provider is a string value: "etcd", "etcd3", "consul" or "firestore" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+// Secure Remote Providers are implemented with github.com/bketelsen/crypt
+func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
+}
+
+func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint)
+
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ secretKeyring: secretkeyring,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
+ for _, y := range v.remoteProviders {
+ if reflect.DeepEqual(y, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// searchMap recursively searches for a value for path in source map.
+// Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ next, ok := source[path[0]]
+ if ok {
+ // Fast path
+ if len(path) == 1 {
+ return next
+ }
+
+ // Nested case
+ switch next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchMap(cast.ToStringMap(next), path[1:])
+ case map[string]interface{}:
+ // Type assertion is safe here since it is only reached
+ // if the type of `next` is the same as the type being asserted
+ return v.searchMap(next.(map[string]interface{}), path[1:])
+ default:
+ // got a value but nested key expected, return "nil" for not found
+ return nil
+ }
+ }
+ return nil
+}
+
+// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice.
+//
+// While searchMap() considers each path element as a single map key or slice index, this
+// function searches for, and prioritizes, merged path elements.
+// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
+// is also defined, this latter value is returned for path ["foo", "bar"].
+//
+// This should be useful only at config level (other maps may not contain dots
+// in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ // search for path prefixes, starting from the longest one
+ for i := len(path); i > 0; i-- {
+ prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
+
+ var val interface{}
+ switch sourceIndexable := source.(type) {
+ case []interface{}:
+ val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path)
+ case map[string]interface{}:
+ val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path)
+ }
+ if val != nil {
+ return val
+ }
+ }
+
+ // not found
+ return nil
+}
+
+// searchSliceWithPathPrefixes searches for a value for path in sourceSlice
+//
+// This function is part of the searchIndexableWithPathPrefixes recurring search and
+// should not be called directly from functions other than searchIndexableWithPathPrefixes.
+func (v *Viper) searchSliceWithPathPrefixes(
+ sourceSlice []interface{},
+ prefixKey string,
+ pathIndex int,
+ path []string,
+) interface{} {
+ // if the prefixKey is not a number or it is out of bounds of the slice
+ index, err := strconv.Atoi(prefixKey)
+ if err != nil || len(sourceSlice) <= index {
+ return nil
+ }
+
+ next := sourceSlice[index]
+
+ // Fast path
+ if pathIndex == len(path) {
+ return next
+ }
+
+ switch n := next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:])
+ case map[string]interface{}, []interface{}:
+ return v.searchIndexableWithPathPrefixes(n, path[pathIndex:])
+ default:
+ // got a value but nested key expected, do nothing and look for next prefix
+ }
+
+ // not found
+ return nil
+}
+
+// searchMapWithPathPrefixes searches for a value for path in sourceMap
+//
+// This function is part of the searchIndexableWithPathPrefixes recurring search and
+// should not be called directly from functions other than searchIndexableWithPathPrefixes.
+func (v *Viper) searchMapWithPathPrefixes(
+ sourceMap map[string]interface{},
+ prefixKey string,
+ pathIndex int,
+ path []string,
+) interface{} {
+ next, ok := sourceMap[prefixKey]
+ if !ok {
+ return nil
+ }
+
+ // Fast path
+ if pathIndex == len(path) {
+ return next
+ }
+
+ // Nested case
+ switch n := next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:])
+ case map[string]interface{}, []interface{}:
+ return v.searchIndexableWithPathPrefixes(n, path[pathIndex:])
+ default:
+ // got a value but nested key expected, do nothing and look for next prefix
+ }
+
+ // not found
+ return nil
+}
+
+// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
+// on its path in the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+//
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
+ var parentVal interface{}
+ for i := 1; i < len(path); i++ {
+ parentVal = v.searchMap(m, path[0:i])
+ if parentVal == nil {
+ // not found, no need to add more path elements
+ return ""
+ }
+ switch parentVal.(type) {
+ case map[interface{}]interface{}:
+ continue
+ case map[string]interface{}:
+ continue
+ default:
+ // parentVal is a regular value which shadows "path"
+ return strings.Join(path[0:i], v.keyDelim)
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
+// in a sub-path of the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+//
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
+ // unify input map
+ var m map[string]interface{}
+ switch mi.(type) {
+ case map[string]string, map[string]FlagValue:
+ m = cast.ToStringMap(mi)
+ default:
+ return ""
+ }
+
+ // scan paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := m[parentKey]; ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
+// in the environment, when automatic env is on.
+// e.g., if "foo.bar" has a value in the environment, it “shadows”
+//
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// SetTypeByDefaultValue enables or disables the inference of a key value's
+// type when the Get function is used based upon a key's default value as
+// opposed to the value returned based on the normal fetch logic.
+//
+// For example, if a key has a default value of []string{} and the same key
+// is set via an environment variable to "a b c", a call to the Get function
+// would return a string slice for the key if the key's type is inferred by
+// the default value and the Get function would return:
+//
+// []string {"a", "b", "c"}
+//
+// Otherwise the Get function would return:
+//
+// "a b c"
+func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
+
+func (v *Viper) SetTypeByDefaultValue(enable bool) {
+ v.typeByDefValue = enable
+}
+
+// GetViper gets the global Viper instance.
+func GetViper() *Viper {
+ return v
+}
+
+// Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
+// Get has the behavior of returning the value associated with the first
+// place from where it is set. Viper will check in the following order:
+// override, flag, env, config file, key/value store, default
+//
+// Get returns an interface. For a specific value use one of the Get____ methods.
+func Get(key string) interface{} { return v.Get(key) }
+
+func (v *Viper) Get(key string) interface{} {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, true)
+ if val == nil {
+ return nil
+ }
+
+ if v.typeByDefValue {
+ // TODO(bep) this branch isn't covered by a single test.
+ valType := val
+ path := strings.Split(lcaseKey, v.keyDelim)
+ defVal := v.searchMap(v.defaults, path)
+ if defVal != nil {
+ valType = defVal
+ }
+
+ switch valType.(type) {
+ case bool:
+ return cast.ToBool(val)
+ case string:
+ return cast.ToString(val)
+ case int32, int16, int8, int:
+ return cast.ToInt(val)
+ case uint:
+ return cast.ToUint(val)
+ case uint32:
+ return cast.ToUint32(val)
+ case uint64:
+ return cast.ToUint64(val)
+ case int64:
+ return cast.ToInt64(val)
+ case float64, float32:
+ return cast.ToFloat64(val)
+ case time.Time:
+ return cast.ToTime(val)
+ case time.Duration:
+ return cast.ToDuration(val)
+ case []string:
+ return cast.ToStringSlice(val)
+ case []int:
+ return cast.ToIntSlice(val)
+ }
+ }
+
+ return val
+}
+
+// Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
+func Sub(key string) *Viper { return v.Sub(key) }
+
+func (v *Viper) Sub(key string) *Viper {
+ subv := New()
+ data := v.Get(key)
+ if data == nil {
+ return nil
+ }
+
+ if reflect.TypeOf(data).Kind() == reflect.Map {
+ subv.config = cast.ToStringMap(data)
+ return subv
+ }
+ return nil
+}
+
+// GetString returns the value associated with the key as a string.
+func GetString(key string) string { return v.GetString(key) }
+
+func (v *Viper) GetString(key string) string {
+ return cast.ToString(v.Get(key))
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func GetBool(key string) bool { return v.GetBool(key) }
+
+func (v *Viper) GetBool(key string) bool {
+ return cast.ToBool(v.Get(key))
+}
+
+// GetInt returns the value associated with the key as an integer.
+func GetInt(key string) int { return v.GetInt(key) }
+
+func (v *Viper) GetInt(key string) int {
+ return cast.ToInt(v.Get(key))
+}
+
+// GetInt32 returns the value associated with the key as an integer.
+func GetInt32(key string) int32 { return v.GetInt32(key) }
+
+func (v *Viper) GetInt32(key string) int32 {
+ return cast.ToInt32(v.Get(key))
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func GetInt64(key string) int64 { return v.GetInt64(key) }
+
+func (v *Viper) GetInt64(key string) int64 {
+ return cast.ToInt64(v.Get(key))
+}
+
+// GetUint returns the value associated with the key as an unsigned integer.
+func GetUint(key string) uint { return v.GetUint(key) }
+
+func (v *Viper) GetUint(key string) uint {
+ return cast.ToUint(v.Get(key))
+}
+
+// GetUint16 returns the value associated with the key as an unsigned integer.
+func GetUint16(key string) uint16 { return v.GetUint16(key) }
+
+func (v *Viper) GetUint16(key string) uint16 {
+ return cast.ToUint16(v.Get(key))
+}
+
+// GetUint32 returns the value associated with the key as an unsigned integer.
+func GetUint32(key string) uint32 { return v.GetUint32(key) }
+
+func (v *Viper) GetUint32(key string) uint32 {
+ return cast.ToUint32(v.Get(key))
+}
+
+// GetUint64 returns the value associated with the key as an unsigned integer.
+func GetUint64(key string) uint64 { return v.GetUint64(key) }
+
+func (v *Viper) GetUint64(key string) uint64 {
+ return cast.ToUint64(v.Get(key))
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func GetFloat64(key string) float64 { return v.GetFloat64(key) }
+
+func (v *Viper) GetFloat64(key string) float64 {
+ return cast.ToFloat64(v.Get(key))
+}
+
+// GetTime returns the value associated with the key as time.
+func GetTime(key string) time.Time { return v.GetTime(key) }
+
+func (v *Viper) GetTime(key string) time.Time {
+ return cast.ToTime(v.Get(key))
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func GetDuration(key string) time.Duration { return v.GetDuration(key) }
+
+func (v *Viper) GetDuration(key string) time.Duration {
+ return cast.ToDuration(v.Get(key))
+}
+
+// GetIntSlice returns the value associated with the key as a slice of int values.
+func GetIntSlice(key string) []int { return v.GetIntSlice(key) }
+
+func (v *Viper) GetIntSlice(key string) []int {
+ return cast.ToIntSlice(v.Get(key))
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
+
+func (v *Viper) GetStringSlice(key string) []string {
+ return cast.ToStringSlice(v.Get(key))
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
+
+func (v *Viper) GetStringMap(key string) map[string]interface{} {
+ return cast.ToStringMap(v.Get(key))
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
+
+func (v *Viper) GetStringMapString(key string) map[string]string {
+ return cast.ToStringMapString(v.Get(key))
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
+
+func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
+ return cast.ToStringMapStringSlice(v.Get(key))
+}
+
+// GetSizeInBytes returns the size of the value associated with the given key
+// in bytes.
+func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
+
+func (v *Viper) GetSizeInBytes(key string) uint {
+ sizeStr := cast.ToString(v.Get(key))
+ return parseSizeInBytes(sizeStr)
+}
+
+// UnmarshalKey takes a single key and unmarshals it into a Struct.
+func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.UnmarshalKey(key, rawVal, opts...)
+}
+
+func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+ return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
+}
+
+// Unmarshal unmarshals the config into a Struct. Make sure that the tags
+// on the fields of the structure are properly set.
+func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.Unmarshal(rawVal, opts...)
+}
+
+func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
+}
+
+// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
+// of time.Duration values & string slices
+func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
+ c := &mapstructure.DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc(),
+ mapstructure.StringToSliceHookFunc(","),
+ ),
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c
+}
+
+// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
+func decode(input interface{}, config *mapstructure.DecoderConfig) error {
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+ return decoder.Decode(input)
+}
+
+// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
+// in the destination struct.
+func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.UnmarshalExact(rawVal, opts...)
+}
+
+func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
+ config := defaultDecoderConfig(rawVal, opts...)
+ config.ErrorUnused = true
+
+ return decode(v.AllSettings(), config)
+}
+
+// BindPFlags binds a full flag set to the configuration, using each flag's long
+// name as the config key.
+func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
+
+func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
+ return v.BindFlagValues(pflagValueSet{flags})
+}
+
+// BindPFlag binds a specific key to a pflag (as used by cobra).
+// Example (where serverCmd is a Cobra instance):
+//
+// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
+
+func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
+ if flag == nil {
+ return fmt.Errorf("flag for %q is nil", key)
+ }
+ return v.BindFlagValue(key, pflagValue{flag})
+}
+
+// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
+// name as the config key.
+func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
+
+func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
+ flags.VisitAll(func(flag FlagValue) {
+ if err = v.BindFlagValue(flag.Name(), flag); err != nil {
+ return
+ }
+ })
+ return nil
+}
+
+// BindFlagValue binds a specific key to a FlagValue.
+func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
+
+func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
+ if flag == nil {
+ return fmt.Errorf("flag for %q is nil", key)
+ }
+ v.pflags[strings.ToLower(key)] = flag
+ return nil
+}
+
+// BindEnv binds a Viper key to a ENV variable.
+// ENV variables are case sensitive.
+// If only a key is provided, it will use the env key matching the key, uppercased.
+// If more arguments are provided, they will represent the env variable names that
+// should bind to this key and will be taken in the specified order.
+// EnvPrefix will be used when set when env name is not provided.
+func BindEnv(input ...string) error { return v.BindEnv(input...) }
+
+func (v *Viper) BindEnv(input ...string) error {
+ if len(input) == 0 {
+ return fmt.Errorf("missing key to bind to")
+ }
+
+ key := strings.ToLower(input[0])
+
+ if len(input) == 1 {
+ v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key))
+ } else {
+ v.env[key] = append(v.env[key], input[1:]...)
+ }
+
+ return nil
+}
+
+// MustBindEnv wraps BindEnv in a panic.
+// If there is an error binding an environment variable, MustBindEnv will
+// panic.
+func MustBindEnv(input ...string) { v.MustBindEnv(input...) }
+
+func (v *Viper) MustBindEnv(input ...string) {
+ if err := v.BindEnv(input...); err != nil {
+ panic(fmt.Sprintf("error while binding environment variable: %v", err))
+ }
+}
+
+// Given a key, find the value.
+//
+// Viper will check to see if an alias exists first.
+// Viper will then check in the following order:
+// flag, env, config file, key/value store.
+// Lastly, if no value was found and flagDefault is true, and if the key
+// corresponds to a flag, the flag's default value is returned.
+//
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
+ var (
+ val interface{}
+ exists bool
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+ )
+
+ // compute the path through the nested maps to the nested value
+ if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
+ return nil
+ }
+
+ // if the requested key is an alias, then return the proper key
+ lcaseKey = v.realKey(lcaseKey)
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+
+ // Set() override first
+ val = v.searchMap(v.override, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
+ return nil
+ }
+
+ // PFlag override next
+ flag, exists := v.pflags[lcaseKey]
+ if exists && flag.HasChanged() {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice", "stringArray":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
+ case "intSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return cast.ToIntSlice(res)
+ case "stringToString":
+ return stringToStringConv(flag.ValueString())
+ default:
+ return flag.ValueString()
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
+ return nil
+ }
+
+ // Env override next
+ if v.automaticEnvApplied {
+ // even if it hasn't been registered, if automaticEnv is used,
+ // check any Get request
+ if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok {
+ return val
+ }
+ if nested && v.isPathShadowedInAutoEnv(path) != "" {
+ return nil
+ }
+ }
+ envkeys, exists := v.env[lcaseKey]
+ if exists {
+ for _, envkey := range envkeys {
+ if val, ok := v.getEnv(envkey); ok {
+ return val
+ }
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
+ return nil
+ }
+
+ // Config file next
+ val = v.searchIndexableWithPathPrefixes(v.config, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
+ return nil
+ }
+
+ // K/V store next
+ val = v.searchMap(v.kvstore, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
+ return nil
+ }
+
+ // Default next
+ val = v.searchMap(v.defaults, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
+ return nil
+ }
+
+ if flagDefault {
+ // last chance: if no value is found and a flag does exist for the key,
+ // get the flag's default value even if the flag's value has not been set.
+ if flag, exists := v.pflags[lcaseKey]; exists {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice", "stringArray":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
+ case "intSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return cast.ToIntSlice(res)
+ case "stringToString":
+ return stringToStringConv(flag.ValueString())
+ default:
+ return flag.ValueString()
+ }
+ }
+ // last item, no need to check shadowing
+ }
+
+ return nil
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79
+// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap
+func stringToStringConv(val string) interface{} {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]interface{}{}
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil
+ }
+ out := make(map[string]interface{}, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
+func IsSet(key string) bool { return v.IsSet(key) }
+
+func (v *Viper) IsSet(key string) bool {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, false)
+ return val != nil
+}
+
+// AutomaticEnv makes Viper check if environment variables match any of the existing keys
+// (config, default or flags). If matching env vars are found, they are loaded into Viper.
+func AutomaticEnv() { v.AutomaticEnv() }
+
+func (v *Viper) AutomaticEnv() {
+ v.automaticEnvApplied = true
+}
+
+// SetEnvKeyReplacer sets the strings.Replacer on the viper object
+// Useful for mapping an environmental variable to a key that does
+// not match it.
+func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
+
+func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
+ v.envKeyReplacer = r
+}
+
+// RegisterAlias creates an alias that provides another accessor for the same key.
+// This enables one to change a name without breaking the application.
+func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
+
+func (v *Viper) RegisterAlias(alias string, key string) {
+ v.registerAlias(alias, strings.ToLower(key))
+}
+
+func (v *Viper) registerAlias(alias string, key string) {
+ alias = strings.ToLower(alias)
+ if alias != key && alias != v.realKey(key) {
+ _, exists := v.aliases[alias]
+
+ if !exists {
+ // if we alias something that exists in one of the maps to another
+ // name, we'll never be able to get that value using the original
+ // name, so move the config value to the new realkey.
+ if val, ok := v.config[alias]; ok {
+ delete(v.config, alias)
+ v.config[key] = val
+ }
+ if val, ok := v.kvstore[alias]; ok {
+ delete(v.kvstore, alias)
+ v.kvstore[key] = val
+ }
+ if val, ok := v.defaults[alias]; ok {
+ delete(v.defaults, alias)
+ v.defaults[key] = val
+ }
+ if val, ok := v.override[alias]; ok {
+ delete(v.override, alias)
+ v.override[key] = val
+ }
+ v.aliases[alias] = key
+ }
+ } else {
+ v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key))
+ }
+}
+
+func (v *Viper) realKey(key string) string {
+ newkey, exists := v.aliases[key]
+ if exists {
+ v.logger.Debug("key is an alias", "alias", key, "to", newkey)
+
+ return v.realKey(newkey)
+ }
+ return key
+}
+
+// InConfig checks to see if the given key (or an alias) is in the config file.
+func InConfig(key string) bool { return v.InConfig(key) }
+
+func (v *Viper) InConfig(key string) bool {
+ lcaseKey := strings.ToLower(key)
+
+ // if the requested key is an alias, then return the proper key
+ lcaseKey = v.realKey(lcaseKey)
+ path := strings.Split(lcaseKey, v.keyDelim)
+
+ return v.searchIndexableWithPathPrefixes(v.config, path) != nil
+}
+
+// SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
+// Default only used when no value is provided by the user via flag, config or ENV.
+func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
+
+func (v *Viper) SetDefault(key string, value interface{}) {
+ // If alias passed in, then set the proper default
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// Set sets the value for the key in the override register.
+// Set is case-insensitive for a key.
+// Will be used instead of values obtained via
+// flags, config file, ENV, default, or key/value store.
+func Set(key string, value interface{}) { v.Set(key, value) }
+
+func (v *Viper) Set(key string, value interface{}) {
+ // If alias passed in, then set the proper override
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.override, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// ReadInConfig will discover and load the configuration file from disk
+// and key/value stores, searching in one of the defined paths.
+func ReadInConfig() error { return v.ReadInConfig() }
+
+func (v *Viper) ReadInConfig() error {
+ v.logger.Info("attempting to read in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ v.logger.Debug("reading file", "file", filename)
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ config := make(map[string]interface{})
+
+ err = v.unmarshalReader(bytes.NewReader(file), config)
+ if err != nil {
+ return err
+ }
+
+ v.config = config
+ return nil
+}
+
+// MergeInConfig merges a new configuration with an existing config.
+func MergeInConfig() error { return v.MergeInConfig() }
+
+func (v *Viper) MergeInConfig() error {
+ v.logger.Info("attempting to merge in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ return v.MergeConfig(bytes.NewReader(file))
+}
+
+// ReadConfig will read a configuration file, setting existing keys to nil if the
+// key does not exist in the file.
+func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
+
+func (v *Viper) ReadConfig(in io.Reader) error {
+ v.config = make(map[string]interface{})
+ return v.unmarshalReader(in, v.config)
+}
+
+// MergeConfig merges a new configuration with an existing config.
+func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
+
+func (v *Viper) MergeConfig(in io.Reader) error {
+ cfg := make(map[string]interface{})
+ if err := v.unmarshalReader(in, cfg); err != nil {
+ return err
+ }
+ return v.MergeConfigMap(cfg)
+}
+
+// MergeConfigMap merges the configuration from the map given with an existing config.
+// Note that the map given may be modified.
+func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
+
+func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ insensitiviseMap(cfg)
+ mergeMaps(cfg, v.config, nil)
+ return nil
+}
+
+// WriteConfig writes the current configuration to a file.
+func WriteConfig() error { return v.WriteConfig() }
+
+func (v *Viper) WriteConfig() error {
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+ return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfig writes current configuration to file only if the file does not exist.
+func SafeWriteConfig() error { return v.SafeWriteConfig() }
+
+func (v *Viper) SafeWriteConfig() error {
+ if len(v.configPaths) < 1 {
+ return errors.New("missing configuration for 'configPath'")
+ }
+ return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType))
+}
+
+// WriteConfigAs writes current configuration to a given filename.
+func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
+
+func (v *Viper) WriteConfigAs(filename string) error {
+ return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
+func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
+
+func (v *Viper) SafeWriteConfigAs(filename string) error {
+ alreadyExists, err := afero.Exists(v.fs, filename)
+ if alreadyExists && err == nil {
+ return ConfigFileAlreadyExistsError(filename)
+ }
+ return v.writeConfig(filename, false)
+}
+
+func (v *Viper) writeConfig(filename string, force bool) error {
+ v.logger.Info("attempting to write configuration to file")
+
+ var configType string
+
+ ext := filepath.Ext(filename)
+ if ext != "" && ext != filepath.Base(filename) {
+ configType = ext[1:]
+ } else {
+ configType = v.configType
+ }
+ if configType == "" {
+ return fmt.Errorf("config type could not be determined for %s", filename)
+ }
+
+ if !stringInSlice(configType, SupportedExts) {
+ return UnsupportedConfigError(configType)
+ }
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY
+ if !force {
+ flags |= os.O_EXCL
+ }
+ f, err := v.fs.OpenFile(filename, flags, v.configPermissions)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if err := v.marshalWriter(f, configType); err != nil {
+ return err
+ }
+
+ return f.Sync()
+}
+
+// Unmarshal a Reader into a map.
+// Should probably be an unexported function.
+func unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ return v.unmarshalReader(in, c)
+}
+
+func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(in)
+
+ switch format := strings.ToLower(v.getConfigType()); format {
+ case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env":
+ err := v.decoderRegistry.Decode(format, buf.Bytes(), c)
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ }
+
+ insensitiviseMap(c)
+ return nil
+}
+
+// Marshal a map into Writer.
+func (v *Viper) marshalWriter(f afero.File, configType string) error {
+ c := v.AllSettings()
+ switch configType {
+ case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env":
+ b, err := v.encoderRegistry.Encode(configType, c)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ _, err = f.WriteString(string(b))
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ }
+ return nil
+}
+
+func keyExists(k string, m map[string]interface{}) string {
+ lk := strings.ToLower(k)
+ for mk := range m {
+ lmk := strings.ToLower(mk)
+ if lmk == lk {
+ return mk
+ }
+ }
+ return ""
+}
+
+func castToMapStringInterface(
+ src map[interface{}]interface{},
+) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[fmt.Sprintf("%v", k)] = v
+ }
+ return tgt
+}
+
+func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
+// insistence on parsing nested structures as `map[interface{}]interface{}`
+// instead of using a `string` as the key for nest structures beyond one level
+// deep. Both map types are supported as there is a go-yaml fork that uses
+// `map[string]interface{}` instead.
+func mergeMaps(
+ src, tgt map[string]interface{}, itgt map[interface{}]interface{},
+) {
+ for sk, sv := range src {
+ tk := keyExists(sk, tgt)
+ if tk == "" {
+ v.logger.Trace("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ tv, ok := tgt[tk]
+ if !ok {
+ v.logger.Trace("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ svType := reflect.TypeOf(sv)
+ tvType := reflect.TypeOf(tv)
+
+ v.logger.Trace(
+ "processing",
+ "key", sk,
+ "st", svType,
+ "tt", tvType,
+ "sv", sv,
+ "tv", tv,
+ )
+
+ switch ttv := tv.(type) {
+ case map[interface{}]interface{}:
+ v.logger.Trace("merging maps (must convert)")
+ tsv, ok := sv.(map[interface{}]interface{})
+ if !ok {
+ v.logger.Error(
+ "Could not cast sv to map[interface{}]interface{}",
+ "key", sk,
+ "st", svType,
+ "tt", tvType,
+ "sv", sv,
+ "tv", tv,
+ )
+ continue
+ }
+
+ ssv := castToMapStringInterface(tsv)
+ stv := castToMapStringInterface(ttv)
+ mergeMaps(ssv, stv, ttv)
+ case map[string]interface{}:
+ v.logger.Trace("merging maps")
+ tsv, ok := sv.(map[string]interface{})
+ if !ok {
+ v.logger.Error(
+ "Could not cast sv to map[string]interface{}",
+ "key", sk,
+ "st", svType,
+ "tt", tvType,
+ "sv", sv,
+ "tv", tv,
+ )
+ continue
+ }
+ mergeMaps(tsv, ttv, nil)
+ default:
+ v.logger.Trace("setting value")
+ tgt[tk] = sv
+ if itgt != nil {
+ itgt[tk] = sv
+ }
+ }
+ }
+}
+
+// ReadRemoteConfig attempts to get configuration from a remote source
+// and read it in the remote configuration registry.
+func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
+
+func (v *Viper) ReadRemoteConfig() error {
+ return v.getKeyValueConfig()
+}
+
+func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
+func (v *Viper) WatchRemoteConfig() error {
+ return v.watchKeyValueConfig()
+}
+
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+ return v.watchKeyValueConfigOnChannel()
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) getKeyValueConfig() error {
+ if RemoteConfig == nil {
+ return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
+ }
+
+ if len(v.remoteProviders) == 0 {
+ return RemoteConfigError("No Remote Providers")
+ }
+
+ for _, rp := range v.remoteProviders {
+ val, err := v.getRemoteConfig(rp)
+ if err != nil {
+ v.logger.Error(fmt.Errorf("get remote config: %w", err).Error())
+
+ continue
+ }
+
+ v.kvstore = val
+
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Get(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+ if len(v.remoteProviders) == 0 {
+ return RemoteConfigError("No Remote Providers")
+ }
+
+ for _, rp := range v.remoteProviders {
+ respc, _ := RemoteConfig.WatchChannel(rp)
+ // Todo: Add quit channel
+ go func(rc <-chan *RemoteResponse) {
+ for {
+ b := <-rc
+ reader := bytes.NewReader(b.Value)
+ v.unmarshalReader(reader, v.kvstore)
+ }
+ }(respc)
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfig() error {
+ if len(v.remoteProviders) == 0 {
+ return RemoteConfigError("No Remote Providers")
+ }
+
+ for _, rp := range v.remoteProviders {
+ val, err := v.watchRemoteConfig(rp)
+ if err != nil {
+ v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error())
+
+ continue
+ }
+ v.kvstore = val
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Watch(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a v.keyDelim separator
+func AllKeys() []string { return v.AllKeys() }
+
+func (v *Viper) AllKeys() []string {
+ m := map[string]bool{}
+ // add all paths, by order of descending priority to ensure correct shadowing
+ m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
+ m = v.flattenAndMergeMap(m, v.override, "")
+ m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+ m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env))
+ m = v.flattenAndMergeMap(m, v.config, "")
+ m = v.flattenAndMergeMap(m, v.kvstore, "")
+ m = v.flattenAndMergeMap(m, v.defaults, "")
+
+ // convert set of paths to list
+ a := make([]string, 0, len(m))
+ for x := range m {
+ a = append(a, x)
+ }
+ return a
+}
+
+// flattenAndMergeMap recursively flattens the given map into a map[string]bool
+// of key paths (used as a set, easier to manipulate than a []string):
+// - each path is merged into a single key string, delimited with v.keyDelim
+// - if a path is shadowed by an earlier value in the initial shadow map,
+// it is skipped.
+//
+// The resulting set of paths is merged to the given shadow set at the same time.
+func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
+ if shadow != nil && prefix != "" && shadow[prefix] {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]bool)
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += v.keyDelim
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = true
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
+ }
+ return shadow
+}
+
+// mergeFlatMap merges the given maps, excluding values of the second map
+// shadowed by values from the first map.
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
+ // scan keys
+outer:
+ for k := range m {
+ path := strings.Split(k, v.keyDelim)
+ // scan intermediate paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if shadow[parentKey] {
+ // path is shadowed, continue
+ continue outer
+ }
+ }
+ // add key
+ shadow[strings.ToLower(k)] = true
+ }
+ return shadow
+}
+
+// AllSettings merges all settings and returns them as a map[string]interface{}.
+func AllSettings() map[string]interface{} { return v.AllSettings() }
+
+func (v *Viper) AllSettings() map[string]interface{} {
+ m := map[string]interface{}{}
+ // start from the list of keys, and construct the map one value at a time
+ for _, k := range v.AllKeys() {
+ value := v.Get(k)
+ if value == nil {
+ // should not happen, since AllKeys() returns only keys holding a value,
+ // check just in case anything changes
+ continue
+ }
+ path := strings.Split(k, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(m, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+ return m
+}
+
+// SetFs sets the filesystem to use to read configuration.
+func SetFs(fs afero.Fs) { v.SetFs(fs) }
+
+func (v *Viper) SetFs(fs afero.Fs) {
+ v.fs = fs
+}
+
+// SetConfigName sets name for the config file.
+// Does not include extension.
+func SetConfigName(in string) { v.SetConfigName(in) }
+
+func (v *Viper) SetConfigName(in string) {
+ if in != "" {
+ v.configName = in
+ v.configFile = ""
+ }
+}
+
+// SetConfigType sets the type of the configuration returned by the
+// remote source, e.g. "json".
+func SetConfigType(in string) { v.SetConfigType(in) }
+
+func (v *Viper) SetConfigType(in string) {
+ if in != "" {
+ v.configType = in
+ }
+}
+
+// SetConfigPermissions sets the permissions for the config file.
+func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) }
+
+func (v *Viper) SetConfigPermissions(perm os.FileMode) {
+ v.configPermissions = perm.Perm()
+}
+
+// IniLoadOptions sets the load options for ini parsing.
+func IniLoadOptions(in ini.LoadOptions) Option {
+ return optionFunc(func(v *Viper) {
+ v.iniLoadOptions = in
+ })
+}
+
+func (v *Viper) getConfigType() string {
+ if v.configType != "" {
+ return v.configType
+ }
+
+ cf, err := v.getConfigFile()
+ if err != nil {
+ return ""
+ }
+
+ ext := filepath.Ext(cf)
+
+ if len(ext) > 1 {
+ return ext[1:]
+ }
+
+ return ""
+}
+
+func (v *Viper) getConfigFile() (string, error) {
+ if v.configFile == "" {
+ cf, err := v.findConfigFile()
+ if err != nil {
+ return "", err
+ }
+ v.configFile = cf
+ }
+ return v.configFile, nil
+}
+
+// Debug prints all configuration registries for debugging
+// purposes.
+func Debug() { v.Debug() }
+func DebugTo(w io.Writer) { v.DebugTo(w) }
+
+func (v *Viper) Debug() { v.DebugTo(os.Stdout) }
+
+func (v *Viper) DebugTo(w io.Writer) {
+ fmt.Fprintf(w, "Aliases:\n%#v\n", v.aliases)
+ fmt.Fprintf(w, "Override:\n%#v\n", v.override)
+ fmt.Fprintf(w, "PFlags:\n%#v\n", v.pflags)
+ fmt.Fprintf(w, "Env:\n%#v\n", v.env)
+ fmt.Fprintf(w, "Key/Value Store:\n%#v\n", v.kvstore)
+ fmt.Fprintf(w, "Config:\n%#v\n", v.config)
+ fmt.Fprintf(w, "Defaults:\n%#v\n", v.defaults)
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/viper_go1_15.go b/test/integration/vendor/github.com/spf13/viper/viper_go1_15.go
new file mode 100644
index 000000000..19a771cbd
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/viper_go1_15.go
@@ -0,0 +1,57 @@
+//go:build !go1.16 || !finder
+// +build !go1.16 !finder
+
+package viper
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/afero"
+)
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+ v.logger.Info("searching for config in paths", "paths", v.configPaths)
+
+ for _, cp := range v.configPaths {
+ file := v.searchInPath(cp)
+ if file != "" {
+ return file, nil
+ }
+ }
+ return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+}
+
+func (v *Viper) searchInPath(in string) (filename string) {
+ v.logger.Debug("searching for config in path", "path", in)
+ for _, ext := range SupportedExts {
+ v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext))
+ if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
+ v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext))
+ return filepath.Join(in, v.configName+"."+ext)
+ }
+ }
+
+ if v.configType != "" {
+ if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
+ return filepath.Join(in, v.configName)
+ }
+ }
+
+ return ""
+}
+
+// Check if file Exists
+func exists(fs afero.Fs, path string) (bool, error) {
+ stat, err := fs.Stat(path)
+ if err == nil {
+ return !stat.IsDir(), nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/viper_go1_16.go b/test/integration/vendor/github.com/spf13/viper/viper_go1_16.go
new file mode 100644
index 000000000..e10172fa3
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/viper_go1_16.go
@@ -0,0 +1,32 @@
+//go:build go1.16 && finder
+// +build go1.16,finder
+
+package viper
+
+import (
+ "fmt"
+
+ "github.com/spf13/afero"
+)
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+ finder := finder{
+ paths: v.configPaths,
+ fileNames: []string{v.configName},
+ extensions: SupportedExts,
+ withoutExtension: v.configType != "",
+ }
+
+ file, err := finder.Find(afero.NewIOFS(v.fs))
+ if err != nil {
+ return "", err
+ }
+
+ if file == "" {
+ return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+ }
+
+ return file, nil
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/watch.go b/test/integration/vendor/github.com/spf13/viper/watch.go
new file mode 100644
index 000000000..1ce84eaf8
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/watch.go
@@ -0,0 +1,12 @@
+//go:build darwin || dragonfly || freebsd || openbsd || linux || netbsd || solaris || windows
+// +build darwin dragonfly freebsd openbsd linux netbsd solaris windows
+
+package viper
+
+import "github.com/fsnotify/fsnotify"
+
+type watcher = fsnotify.Watcher
+
+func newWatcher() (*watcher, error) {
+ return fsnotify.NewWatcher()
+}
diff --git a/test/integration/vendor/github.com/spf13/viper/watch_unsupported.go b/test/integration/vendor/github.com/spf13/viper/watch_unsupported.go
new file mode 100644
index 000000000..7e2715377
--- /dev/null
+++ b/test/integration/vendor/github.com/spf13/viper/watch_unsupported.go
@@ -0,0 +1,32 @@
+//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
+// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+package viper
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func newWatcher() (*watcher, error) {
+ return &watcher{}, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
+}
+
+type watcher struct {
+ Events chan fsnotify.Event
+ Errors chan error
+}
+
+func (*watcher) Close() error {
+ return nil
+}
+
+func (*watcher) Add(name string) error {
+ return nil
+}
+
+func (*watcher) Remove(name string) error {
+ return nil
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/.codeclimate.yml b/test/integration/vendor/github.com/stretchr/objx/.codeclimate.yml
new file mode 100644
index 000000000..559fa399c
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/.codeclimate.yml
@@ -0,0 +1,21 @@
+engines:
+ gofmt:
+ enabled: true
+ golint:
+ enabled: true
+ govet:
+ enabled: true
+
+exclude_patterns:
+- ".github/"
+- "vendor/"
+- "codegen/"
+- "*.yml"
+- ".*.yml"
+- "*.md"
+- "Gopkg.*"
+- "doc.go"
+- "type_specific_codegen_test.go"
+- "type_specific_codegen.go"
+- ".gitignore"
+- "LICENSE"
diff --git a/test/integration/vendor/github.com/stretchr/objx/.gitignore b/test/integration/vendor/github.com/stretchr/objx/.gitignore
new file mode 100644
index 000000000..ea58090bd
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/.gitignore
@@ -0,0 +1,11 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/test/integration/vendor/github.com/stretchr/objx/LICENSE b/test/integration/vendor/github.com/stretchr/objx/LICENSE
new file mode 100644
index 000000000..44d4d9d5a
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (c) 2014 Stretchr, Inc.
+Copyright (c) 2017-2018 objx contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/test/integration/vendor/github.com/stretchr/objx/README.md b/test/integration/vendor/github.com/stretchr/objx/README.md
new file mode 100644
index 000000000..246660b21
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/README.md
@@ -0,0 +1,80 @@
+# Objx
+[](https://travis-ci.org/stretchr/objx)
+[](https://goreportcard.com/report/github.com/stretchr/objx)
+[](https://codeclimate.com/github/stretchr/objx/maintainability)
+[](https://codeclimate.com/github/stretchr/objx/test_coverage)
+[](https://sourcegraph.com/github.com/stretchr/objx)
+[](https://godoc.org/github.com/stretchr/objx)
+
+Objx - Go package for dealing with maps, slices, JSON and other data.
+
+Get started:
+
+- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date)
+- Check out the API Documentation http://godoc.org/github.com/stretchr/objx
+
+## Overview
+Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc.
+
+### Pattern
+Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going:
+
+ m, err := objx.FromJSON(json)
+
+NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking.
+
+Use `Get` to access the value you're interested in. You can use dot and array
+notation too:
+
+ m.Get("places[0].latlng")
+
+Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type.
+
+ if m.Get("code").IsStr() { // Your code... }
+
+Or you can just assume the type, and use one of the strong type methods to extract the real value:
+
+ m.Get("code").Int()
+
+If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value.
+
+ Get("code").Int(-1)
+
+If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below.
+
+### Reading data
+A simple example of how to use Objx:
+
+ // Use MustFromJSON to make an objx.Map from some JSON
+ m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
+
+ // Get the details
+ name := m.Get("name").Str()
+ age := m.Get("age").Int()
+
+ // Get their nickname (or use their name if they don't have one)
+ nickname := m.Get("nickname").Str(name)
+
+### Ranging
+Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect:
+
+ m := objx.MustFromJSON(json)
+ for key, value := range m {
+ // Your code...
+ }
+
+## Installation
+To install Objx, use go get:
+
+ go get github.com/stretchr/objx
+
+### Staying up to date
+To update Objx to the latest version, run:
+
+ go get -u github.com/stretchr/objx
+
+### Supported go versions
+We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment.
+
+## Contributing
+Please feel free to submit issues, fork the repository and send pull requests!
diff --git a/test/integration/vendor/github.com/stretchr/objx/Taskfile.yml b/test/integration/vendor/github.com/stretchr/objx/Taskfile.yml
new file mode 100644
index 000000000..7746f516d
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/Taskfile.yml
@@ -0,0 +1,30 @@
+version: '2'
+
+env:
+ GOFLAGS: -mod=vendor
+
+tasks:
+ default:
+ deps: [test]
+
+ lint:
+ desc: Checks code style
+ cmds:
+ - gofmt -d -s *.go
+ - go vet ./...
+ silent: true
+
+ lint-fix:
+ desc: Fixes code style
+ cmds:
+ - gofmt -w -s *.go
+
+ test:
+ desc: Runs go tests
+ cmds:
+ - go test -race ./...
+
+ test-coverage:
+ desc: Runs go tests and calculates test coverage
+ cmds:
+ - go test -race -coverprofile=c.out ./...
diff --git a/test/integration/vendor/github.com/stretchr/objx/accessors.go b/test/integration/vendor/github.com/stretchr/objx/accessors.go
new file mode 100644
index 000000000..4c6045588
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/accessors.go
@@ -0,0 +1,197 @@
+package objx
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+const (
+ // PathSeparator is the character used to separate the elements
+ // of the keypath.
+ //
+ // For example, `location.address.city`
+ PathSeparator string = "."
+
+ // arrayAccesRegexString is the regex used to extract the array number
+ // from the access path
+ arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
+
+ // mapAccessRegexString is the regex used to extract the map key
+ // from the access path
+ mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$`
+)
+
+// arrayAccesRegex is the compiled arrayAccesRegexString
+var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
+
+// mapAccessRegex is the compiled mapAccessRegexString
+var mapAccessRegex = regexp.MustCompile(mapAccessRegexString)
+
+// Get gets the value using the specified selector and
+// returns it inside a new Obj object.
+//
+// If it cannot find the value, Get will return a nil
+// value inside an instance of Obj.
+//
+// Get can only operate directly on map[string]interface{} and []interface.
+//
+// Example
+//
+// To access the title of the third chapter of the second book, do:
+//
+// o.Get("books[1].chapters[2].title")
+func (m Map) Get(selector string) *Value {
+ rawObj := access(m, selector, nil, false)
+ return &Value{data: rawObj}
+}
+
+// Set sets the value using the specified selector and
+// returns the object on which Set was called.
+//
+// Set can only operate directly on map[string]interface{} and []interface
+//
+// Example
+//
+// To set the title of the third chapter of the second book, do:
+//
+// o.Set("books[1].chapters[2].title","Time to Go")
+func (m Map) Set(selector string, value interface{}) Map {
+ access(m, selector, value, true)
+ return m
+}
+
+// getIndex returns the index, which is hold in s by two braches.
+// It also returns s withour the index part, e.g. name[1] will return (1, name).
+// If no index is found, -1 is returned
+func getIndex(s string) (int, string) {
+ arrayMatches := arrayAccesRegex.FindStringSubmatch(s)
+ if len(arrayMatches) > 0 {
+ // Get the key into the map
+ selector := arrayMatches[1]
+ // Get the index into the array at the key
+ // We know this cannt fail because arrayMatches[2] is an int for sure
+ index, _ := strconv.Atoi(arrayMatches[2])
+ return index, selector
+ }
+ return -1, s
+}
+
+// getKey returns the key which is held in s by two brackets.
+// It also returns the next selector.
+func getKey(s string) (string, string) {
+ selSegs := strings.SplitN(s, PathSeparator, 2)
+ thisSel := selSegs[0]
+ nextSel := ""
+
+ if len(selSegs) > 1 {
+ nextSel = selSegs[1]
+ }
+
+ mapMatches := mapAccessRegex.FindStringSubmatch(s)
+ if len(mapMatches) > 0 {
+ if _, err := strconv.Atoi(mapMatches[2]); err != nil {
+ thisSel = mapMatches[1]
+ nextSel = "[" + mapMatches[2] + "]" + mapMatches[3]
+
+ if thisSel == "" {
+ thisSel = mapMatches[2]
+ nextSel = mapMatches[3]
+ }
+
+ if nextSel == "" {
+ selSegs = []string{"", ""}
+ } else if nextSel[0] == '.' {
+ nextSel = nextSel[1:]
+ }
+ }
+ }
+
+ return thisSel, nextSel
+}
+
+// access accesses the object using the selector and performs the
+// appropriate action.
+func access(current interface{}, selector string, value interface{}, isSet bool) interface{} {
+ thisSel, nextSel := getKey(selector)
+
+ indexes := []int{}
+ for strings.Contains(thisSel, "[") {
+ prevSel := thisSel
+ index := -1
+ index, thisSel = getIndex(thisSel)
+ indexes = append(indexes, index)
+ if prevSel == thisSel {
+ break
+ }
+ }
+
+ if curMap, ok := current.(Map); ok {
+ current = map[string]interface{}(curMap)
+ }
+ // get the object in question
+ switch current.(type) {
+ case map[string]interface{}:
+ curMSI := current.(map[string]interface{})
+ if nextSel == "" && isSet {
+ curMSI[thisSel] = value
+ return nil
+ }
+
+ _, ok := curMSI[thisSel].(map[string]interface{})
+ if !ok {
+ _, ok = curMSI[thisSel].(Map)
+ }
+
+ if (curMSI[thisSel] == nil || !ok) && len(indexes) == 0 && isSet {
+ curMSI[thisSel] = map[string]interface{}{}
+ }
+
+ current = curMSI[thisSel]
+ default:
+ current = nil
+ }
+
+ // do we need to access the item of an array?
+ if len(indexes) > 0 {
+ num := len(indexes)
+ for num > 0 {
+ num--
+ index := indexes[num]
+ indexes = indexes[:num]
+ if array, ok := interSlice(current); ok {
+ if index < len(array) {
+ current = array[index]
+ } else {
+ current = nil
+ break
+ }
+ }
+ }
+ }
+
+ if nextSel != "" {
+ current = access(current, nextSel, value, isSet)
+ }
+ return current
+}
+
+func interSlice(slice interface{}) ([]interface{}, bool) {
+ if array, ok := slice.([]interface{}); ok {
+ return array, ok
+ }
+
+ s := reflect.ValueOf(slice)
+ if s.Kind() != reflect.Slice {
+ return nil, false
+ }
+
+ ret := make([]interface{}, s.Len())
+
+ for i := 0; i < s.Len(); i++ {
+ ret[i] = s.Index(i).Interface()
+ }
+
+ return ret, true
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/conversions.go b/test/integration/vendor/github.com/stretchr/objx/conversions.go
new file mode 100644
index 000000000..080aa46e4
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/conversions.go
@@ -0,0 +1,280 @@
+package objx
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+)
+
+// SignatureSeparator is the character that is used to
+// separate the Base64 string from the security signature.
+const SignatureSeparator = "_"
+
+// URLValuesSliceKeySuffix is the character that is used to
+// specify a suffic for slices parsed by URLValues.
+// If the suffix is set to "[i]", then the index of the slice
+// is used in place of i
+// Ex: Suffix "[]" would have the form a[]=b&a[]=c
+// OR Suffix "[i]" would have the form a[0]=b&a[1]=c
+// OR Suffix "" would have the form a=b&a=c
+var urlValuesSliceKeySuffix = "[]"
+
+const (
+ URLValuesSliceKeySuffixEmpty = ""
+ URLValuesSliceKeySuffixArray = "[]"
+ URLValuesSliceKeySuffixIndex = "[i]"
+)
+
+// SetURLValuesSliceKeySuffix sets the character that is used to
+// specify a suffic for slices parsed by URLValues.
+// If the suffix is set to "[i]", then the index of the slice
+// is used in place of i
+// Ex: Suffix "[]" would have the form a[]=b&a[]=c
+// OR Suffix "[i]" would have the form a[0]=b&a[1]=c
+// OR Suffix "" would have the form a=b&a=c
+func SetURLValuesSliceKeySuffix(s string) error {
+ if s == URLValuesSliceKeySuffixEmpty || s == URLValuesSliceKeySuffixArray || s == URLValuesSliceKeySuffixIndex {
+ urlValuesSliceKeySuffix = s
+ return nil
+ }
+
+ return errors.New("objx: Invalid URLValuesSliceKeySuffix provided.")
+}
+
+// JSON converts the contained object to a JSON string
+// representation
+func (m Map) JSON() (string, error) {
+ for k, v := range m {
+ m[k] = cleanUp(v)
+ }
+
+ result, err := json.Marshal(m)
+ if err != nil {
+ err = errors.New("objx: JSON encode failed with: " + err.Error())
+ }
+ return string(result), err
+}
+
+func cleanUpInterfaceArray(in []interface{}) []interface{} {
+ result := make([]interface{}, len(in))
+ for i, v := range in {
+ result[i] = cleanUp(v)
+ }
+ return result
+}
+
+func cleanUpInterfaceMap(in map[interface{}]interface{}) Map {
+ result := Map{}
+ for k, v := range in {
+ result[fmt.Sprintf("%v", k)] = cleanUp(v)
+ }
+ return result
+}
+
+func cleanUpStringMap(in map[string]interface{}) Map {
+ result := Map{}
+ for k, v := range in {
+ result[k] = cleanUp(v)
+ }
+ return result
+}
+
+func cleanUpMSIArray(in []map[string]interface{}) []Map {
+ result := make([]Map, len(in))
+ for i, v := range in {
+ result[i] = cleanUpStringMap(v)
+ }
+ return result
+}
+
+func cleanUpMapArray(in []Map) []Map {
+ result := make([]Map, len(in))
+ for i, v := range in {
+ result[i] = cleanUpStringMap(v)
+ }
+ return result
+}
+
+func cleanUp(v interface{}) interface{} {
+ switch v := v.(type) {
+ case []interface{}:
+ return cleanUpInterfaceArray(v)
+ case []map[string]interface{}:
+ return cleanUpMSIArray(v)
+ case map[interface{}]interface{}:
+ return cleanUpInterfaceMap(v)
+ case Map:
+ return cleanUpStringMap(v)
+ case []Map:
+ return cleanUpMapArray(v)
+ default:
+ return v
+ }
+}
+
+// MustJSON converts the contained object to a JSON string
+// representation and panics if there is an error
+func (m Map) MustJSON() string {
+ result, err := m.JSON()
+ if err != nil {
+ panic(err.Error())
+ }
+ return result
+}
+
+// Base64 converts the contained object to a Base64 string
+// representation of the JSON string representation
+func (m Map) Base64() (string, error) {
+ var buf bytes.Buffer
+
+ jsonData, err := m.JSON()
+ if err != nil {
+ return "", err
+ }
+
+ encoder := base64.NewEncoder(base64.StdEncoding, &buf)
+ _, _ = encoder.Write([]byte(jsonData))
+ _ = encoder.Close()
+
+ return buf.String(), nil
+}
+
+// MustBase64 converts the contained object to a Base64 string
+// representation of the JSON string representation and panics
+// if there is an error
+func (m Map) MustBase64() string {
+ result, err := m.Base64()
+ if err != nil {
+ panic(err.Error())
+ }
+ return result
+}
+
+// SignedBase64 converts the contained object to a Base64 string
+// representation of the JSON string representation and signs it
+// using the provided key.
+func (m Map) SignedBase64(key string) (string, error) {
+ base64, err := m.Base64()
+ if err != nil {
+ return "", err
+ }
+
+ sig := HashWithKey(base64, key)
+ return base64 + SignatureSeparator + sig, nil
+}
+
+// MustSignedBase64 converts the contained object to a Base64 string
+// representation of the JSON string representation and signs it
+// using the provided key and panics if there is an error
+func (m Map) MustSignedBase64(key string) string {
+ result, err := m.SignedBase64(key)
+ if err != nil {
+ panic(err.Error())
+ }
+ return result
+}
+
+/*
+ URL Query
+ ------------------------------------------------
+*/
+
+// URLValues creates a url.Values object from an Obj. This
+// function requires that the wrapped object be a map[string]interface{}
+func (m Map) URLValues() url.Values {
+ vals := make(url.Values)
+
+ m.parseURLValues(m, vals, "")
+
+ return vals
+}
+
+func (m Map) parseURLValues(queryMap Map, vals url.Values, key string) {
+ useSliceIndex := false
+ if urlValuesSliceKeySuffix == "[i]" {
+ useSliceIndex = true
+ }
+
+ for k, v := range queryMap {
+ val := &Value{data: v}
+ switch {
+ case val.IsObjxMap():
+ if key == "" {
+ m.parseURLValues(val.ObjxMap(), vals, k)
+ } else {
+ m.parseURLValues(val.ObjxMap(), vals, key+"["+k+"]")
+ }
+ case val.IsObjxMapSlice():
+ sliceKey := k
+ if key != "" {
+ sliceKey = key + "[" + k + "]"
+ }
+
+ if useSliceIndex {
+ for i, sv := range val.MustObjxMapSlice() {
+ sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]"
+ m.parseURLValues(sv, vals, sk)
+ }
+ } else {
+ sliceKey = sliceKey + urlValuesSliceKeySuffix
+ for _, sv := range val.MustObjxMapSlice() {
+ m.parseURLValues(sv, vals, sliceKey)
+ }
+ }
+ case val.IsMSISlice():
+ sliceKey := k
+ if key != "" {
+ sliceKey = key + "[" + k + "]"
+ }
+
+ if useSliceIndex {
+ for i, sv := range val.MustMSISlice() {
+ sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]"
+ m.parseURLValues(New(sv), vals, sk)
+ }
+ } else {
+ sliceKey = sliceKey + urlValuesSliceKeySuffix
+ for _, sv := range val.MustMSISlice() {
+ m.parseURLValues(New(sv), vals, sliceKey)
+ }
+ }
+ case val.IsStrSlice(), val.IsBoolSlice(),
+ val.IsFloat32Slice(), val.IsFloat64Slice(),
+ val.IsIntSlice(), val.IsInt8Slice(), val.IsInt16Slice(), val.IsInt32Slice(), val.IsInt64Slice(),
+ val.IsUintSlice(), val.IsUint8Slice(), val.IsUint16Slice(), val.IsUint32Slice(), val.IsUint64Slice():
+
+ sliceKey := k
+ if key != "" {
+ sliceKey = key + "[" + k + "]"
+ }
+
+ if useSliceIndex {
+ for i, sv := range val.StringSlice() {
+ sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]"
+ vals.Set(sk, sv)
+ }
+ } else {
+ sliceKey = sliceKey + urlValuesSliceKeySuffix
+ vals[sliceKey] = val.StringSlice()
+ }
+
+ default:
+ if key == "" {
+ vals.Set(k, val.String())
+ } else {
+ vals.Set(key+"["+k+"]", val.String())
+ }
+ }
+ }
+}
+
+// URLQuery gets an encoded URL query representing the given
+// Obj. This function requires that the wrapped object be a
+// map[string]interface{}
+func (m Map) URLQuery() (string, error) {
+ return m.URLValues().Encode(), nil
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/doc.go b/test/integration/vendor/github.com/stretchr/objx/doc.go
new file mode 100644
index 000000000..6d6af1a83
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/doc.go
@@ -0,0 +1,66 @@
+/*
+Objx - Go package for dealing with maps, slices, JSON and other data.
+
+Overview
+
+Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
+a powerful `Get` method (among others) that allows you to easily and quickly get
+access to data within the map, without having to worry too much about type assertions,
+missing data, default values etc.
+
+Pattern
+
+Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy.
+Call one of the `objx.` functions to create your `objx.Map` to get going:
+
+ m, err := objx.FromJSON(json)
+
+NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
+the rest will be optimistic and try to figure things out without panicking.
+
+Use `Get` to access the value you're interested in. You can use dot and array
+notation too:
+
+ m.Get("places[0].latlng")
+
+Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type.
+
+ if m.Get("code").IsStr() { // Your code... }
+
+Or you can just assume the type, and use one of the strong type methods to extract the real value:
+
+ m.Get("code").Int()
+
+If there's no value there (or if it's the wrong type) then a default value will be returned,
+or you can be explicit about the default value.
+
+ Get("code").Int(-1)
+
+If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating,
+manipulating and selecting that data. You can find out more by exploring the index below.
+
+Reading data
+
+A simple example of how to use Objx:
+
+ // Use MustFromJSON to make an objx.Map from some JSON
+ m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
+
+ // Get the details
+ name := m.Get("name").Str()
+ age := m.Get("age").Int()
+
+ // Get their nickname (or use their name if they don't have one)
+ nickname := m.Get("nickname").Str(name)
+
+Ranging
+
+Since `objx.Map` is a `map[string]interface{}` you can treat it as such.
+For example, to `range` the data, do what you would expect:
+
+ m := objx.MustFromJSON(json)
+ for key, value := range m {
+ // Your code...
+ }
+*/
+package objx
diff --git a/test/integration/vendor/github.com/stretchr/objx/map.go b/test/integration/vendor/github.com/stretchr/objx/map.go
new file mode 100644
index 000000000..a64712a08
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/map.go
@@ -0,0 +1,215 @@
+package objx
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/url"
+ "strings"
+)
+
+// MSIConvertable is an interface that defines methods for converting your
+// custom types to a map[string]interface{} representation.
+type MSIConvertable interface {
+ // MSI gets a map[string]interface{} (msi) representing the
+ // object.
+ MSI() map[string]interface{}
+}
+
+// Map provides extended functionality for working with
+// untyped data, in particular map[string]interface (msi).
+type Map map[string]interface{}
+
+// Value returns the internal value instance
+func (m Map) Value() *Value {
+ return &Value{data: m}
+}
+
+// Nil represents a nil Map.
+var Nil = New(nil)
+
+// New creates a new Map containing the map[string]interface{} in the data argument.
+// If the data argument is not a map[string]interface, New attempts to call the
+// MSI() method on the MSIConvertable interface to create one.
+func New(data interface{}) Map {
+ if _, ok := data.(map[string]interface{}); !ok {
+ if converter, ok := data.(MSIConvertable); ok {
+ data = converter.MSI()
+ } else {
+ return nil
+ }
+ }
+ return Map(data.(map[string]interface{}))
+}
+
+// MSI creates a map[string]interface{} and puts it inside a new Map.
+//
+// The arguments follow a key, value pattern.
+//
+//
+// Returns nil if any key argument is non-string or if there are an odd number of arguments.
+//
+// Example
+//
+// To easily create Maps:
+//
+// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
+//
+// // creates an Map equivalent to
+// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}}
+func MSI(keyAndValuePairs ...interface{}) Map {
+ newMap := Map{}
+ keyAndValuePairsLen := len(keyAndValuePairs)
+ if keyAndValuePairsLen%2 != 0 {
+ return nil
+ }
+ for i := 0; i < keyAndValuePairsLen; i = i + 2 {
+ key := keyAndValuePairs[i]
+ value := keyAndValuePairs[i+1]
+
+ // make sure the key is a string
+ keyString, keyStringOK := key.(string)
+ if !keyStringOK {
+ return nil
+ }
+ newMap[keyString] = value
+ }
+ return newMap
+}
+
+// ****** Conversion Constructors
+
+// MustFromJSON creates a new Map containing the data specified in the
+// jsonString.
+//
+// Panics if the JSON is invalid.
+func MustFromJSON(jsonString string) Map {
+ o, err := FromJSON(jsonString)
+ if err != nil {
+ panic("objx: MustFromJSON failed with error: " + err.Error())
+ }
+ return o
+}
+
+// MustFromJSONSlice creates a new slice of Map containing the data specified in the
+// jsonString. Works with jsons with a top level array
+//
+// Panics if the JSON is invalid.
+func MustFromJSONSlice(jsonString string) []Map {
+ slice, err := FromJSONSlice(jsonString)
+ if err != nil {
+ panic("objx: MustFromJSONSlice failed with error: " + err.Error())
+ }
+ return slice
+}
+
+// FromJSON creates a new Map containing the data specified in the
+// jsonString.
+//
+// Returns an error if the JSON is invalid.
+func FromJSON(jsonString string) (Map, error) {
+ var m Map
+ err := json.Unmarshal([]byte(jsonString), &m)
+ if err != nil {
+ return Nil, err
+ }
+ return m, nil
+}
+
+// FromJSONSlice creates a new slice of Map containing the data specified in the
+// jsonString. Works with jsons with a top level array
+//
+// Returns an error if the JSON is invalid.
+func FromJSONSlice(jsonString string) ([]Map, error) {
+ var slice []Map
+ err := json.Unmarshal([]byte(jsonString), &slice)
+ if err != nil {
+ return nil, err
+ }
+ return slice, nil
+}
+
+// FromBase64 creates a new Obj containing the data specified
+// in the Base64 string.
+//
+// The string is an encoded JSON string returned by Base64
+func FromBase64(base64String string) (Map, error) {
+ decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String))
+ decoded, err := ioutil.ReadAll(decoder)
+ if err != nil {
+ return nil, err
+ }
+ return FromJSON(string(decoded))
+}
+
+// MustFromBase64 creates a new Obj containing the data specified
+// in the Base64 string and panics if there is an error.
+//
+// The string is an encoded JSON string returned by Base64
+func MustFromBase64(base64String string) Map {
+ result, err := FromBase64(base64String)
+ if err != nil {
+ panic("objx: MustFromBase64 failed with error: " + err.Error())
+ }
+ return result
+}
+
+// FromSignedBase64 creates a new Obj containing the data specified
+// in the Base64 string.
+//
+// The string is an encoded JSON string returned by SignedBase64
+func FromSignedBase64(base64String, key string) (Map, error) {
+ parts := strings.Split(base64String, SignatureSeparator)
+ if len(parts) != 2 {
+ return nil, errors.New("objx: Signed base64 string is malformed")
+ }
+
+ sig := HashWithKey(parts[0], key)
+ if parts[1] != sig {
+ return nil, errors.New("objx: Signature for base64 data does not match")
+ }
+ return FromBase64(parts[0])
+}
+
+// MustFromSignedBase64 creates a new Obj containing the data specified
+// in the Base64 string and panics if there is an error.
+//
+// The string is an encoded JSON string returned by Base64
+func MustFromSignedBase64(base64String, key string) Map {
+ result, err := FromSignedBase64(base64String, key)
+ if err != nil {
+ panic("objx: MustFromSignedBase64 failed with error: " + err.Error())
+ }
+ return result
+}
+
+// FromURLQuery generates a new Obj by parsing the specified
+// query.
+//
+// For queries with multiple values, the first value is selected.
+func FromURLQuery(query string) (Map, error) {
+ vals, err := url.ParseQuery(query)
+ if err != nil {
+ return nil, err
+ }
+ m := Map{}
+ for k, vals := range vals {
+ m[k] = vals[0]
+ }
+ return m, nil
+}
+
+// MustFromURLQuery generates a new Obj by parsing the specified
+// query.
+//
+// For queries with multiple values, the first value is selected.
+//
+// Panics if it encounters an error
+func MustFromURLQuery(query string) Map {
+ o, err := FromURLQuery(query)
+ if err != nil {
+ panic("objx: MustFromURLQuery failed with error: " + err.Error())
+ }
+ return o
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/mutations.go b/test/integration/vendor/github.com/stretchr/objx/mutations.go
new file mode 100644
index 000000000..c3400a3f7
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/mutations.go
@@ -0,0 +1,77 @@
+package objx
+
+// Exclude returns a new Map with the keys in the specified []string
+// excluded.
+func (m Map) Exclude(exclude []string) Map {
+ excluded := make(Map)
+ for k, v := range m {
+ if !contains(exclude, k) {
+ excluded[k] = v
+ }
+ }
+ return excluded
+}
+
+// Copy creates a shallow copy of the Obj.
+func (m Map) Copy() Map {
+ copied := Map{}
+ for k, v := range m {
+ copied[k] = v
+ }
+ return copied
+}
+
+// Merge blends the specified map with a copy of this map and returns the result.
+//
+// Keys that appear in both will be selected from the specified map.
+// This method requires that the wrapped object be a map[string]interface{}
+func (m Map) Merge(merge Map) Map {
+ return m.Copy().MergeHere(merge)
+}
+
+// MergeHere blends the specified map with this map and returns the current map.
+//
+// Keys that appear in both will be selected from the specified map. The original map
+// will be modified. This method requires that
+// the wrapped object be a map[string]interface{}
+func (m Map) MergeHere(merge Map) Map {
+ for k, v := range merge {
+ m[k] = v
+ }
+ return m
+}
+
+// Transform builds a new Obj giving the transformer a chance
+// to change the keys and values as it goes. This method requires that
+// the wrapped object be a map[string]interface{}
+func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map {
+ newMap := Map{}
+ for k, v := range m {
+ modifiedKey, modifiedVal := transformer(k, v)
+ newMap[modifiedKey] = modifiedVal
+ }
+ return newMap
+}
+
+// TransformKeys builds a new map using the specified key mapping.
+//
+// Unspecified keys will be unaltered.
+// This method requires that the wrapped object be a map[string]interface{}
+func (m Map) TransformKeys(mapping map[string]string) Map {
+ return m.Transform(func(key string, value interface{}) (string, interface{}) {
+ if newKey, ok := mapping[key]; ok {
+ return newKey, value
+ }
+ return key, value
+ })
+}
+
+// Checks if a string slice contains a string
+func contains(s []string, e string) bool {
+ for _, a := range s {
+ if a == e {
+ return true
+ }
+ }
+ return false
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/security.go b/test/integration/vendor/github.com/stretchr/objx/security.go
new file mode 100644
index 000000000..692be8e2a
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/security.go
@@ -0,0 +1,12 @@
+package objx
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+)
+
+// HashWithKey hashes the specified string using the security key
+func HashWithKey(data, key string) string {
+ d := sha1.Sum([]byte(data + ":" + key))
+ return hex.EncodeToString(d[:])
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/tests.go b/test/integration/vendor/github.com/stretchr/objx/tests.go
new file mode 100644
index 000000000..d9e0b479a
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/tests.go
@@ -0,0 +1,17 @@
+package objx
+
+// Has gets whether there is something at the specified selector
+// or not.
+//
+// If m is nil, Has will always return false.
+func (m Map) Has(selector string) bool {
+ if m == nil {
+ return false
+ }
+ return !m.Get(selector).IsNil()
+}
+
+// IsNil gets whether the data is nil or not.
+func (v *Value) IsNil() bool {
+ return v == nil || v.data == nil
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/type_specific.go b/test/integration/vendor/github.com/stretchr/objx/type_specific.go
new file mode 100644
index 000000000..80f88d9fa
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/type_specific.go
@@ -0,0 +1,346 @@
+package objx
+
+/*
+ MSI (map[string]interface{} and []map[string]interface{})
+*/
+
+// MSI gets the value as a map[string]interface{}, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} {
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ if s, ok := v.data.(Map); ok {
+ return map[string]interface{}(s)
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustMSI gets the value as a map[string]interface{}.
+//
+// Panics if the object is not a map[string]interface{}.
+func (v *Value) MustMSI() map[string]interface{} {
+ if s, ok := v.data.(Map); ok {
+ return map[string]interface{}(s)
+ }
+ return v.data.(map[string]interface{})
+}
+
+// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault
+// value or nil if the value is not a []map[string]interface{}.
+func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} {
+ if s, ok := v.data.([]map[string]interface{}); ok {
+ return s
+ }
+
+ s := v.ObjxMapSlice()
+ if s == nil {
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+ }
+
+ result := make([]map[string]interface{}, len(s))
+ for i := range s {
+ result[i] = s[i].Value().MSI()
+ }
+ return result
+}
+
+// MustMSISlice gets the value as a []map[string]interface{}.
+//
+// Panics if the object is not a []map[string]interface{}.
+func (v *Value) MustMSISlice() []map[string]interface{} {
+ if s := v.MSISlice(); s != nil {
+ return s
+ }
+
+ return v.data.([]map[string]interface{})
+}
+
+// IsMSI gets whether the object contained is a map[string]interface{} or not.
+func (v *Value) IsMSI() bool {
+ _, ok := v.data.(map[string]interface{})
+ if !ok {
+ _, ok = v.data.(Map)
+ }
+ return ok
+}
+
+// IsMSISlice gets whether the object contained is a []map[string]interface{} or not.
+func (v *Value) IsMSISlice() bool {
+ _, ok := v.data.([]map[string]interface{})
+ if !ok {
+ _, ok = v.data.([]Map)
+ if !ok {
+ s, ok := v.data.([]interface{})
+ if ok {
+ for i := range s {
+ switch s[i].(type) {
+ case Map:
+ case map[string]interface{}:
+ default:
+ return false
+ }
+ }
+ return true
+ }
+ }
+ }
+ return ok
+}
+
+// EachMSI calls the specified callback for each object
+// in the []map[string]interface{}.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value {
+ for index, val := range v.MustMSISlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereMSI uses the specified decider function to select items
+// from the []map[string]interface{}. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value {
+ var selected []map[string]interface{}
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupMSI uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]map[string]interface{}.
+func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value {
+ groups := make(map[string][]map[string]interface{})
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]map[string]interface{}, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceMSI uses the specified function to replace each map[string]interface{}s
+// by iterating each item. The data in the returned result will be a
+// []map[string]interface{} containing the replaced items.
+func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value {
+ arr := v.MustMSISlice()
+ replaced := make([]map[string]interface{}, len(arr))
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectMSI uses the specified collector function to collect a value
+// for each of the map[string]interface{}s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value {
+ arr := v.MustMSISlice()
+ collected := make([]interface{}, len(arr))
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ ObjxMap ((Map) and [](Map))
+*/
+
+// ObjxMap gets the value as a (Map), returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) ObjxMap(optionalDefault ...(Map)) Map {
+ if s, ok := v.data.((Map)); ok {
+ return s
+ }
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return New(nil)
+}
+
+// MustObjxMap gets the value as a (Map).
+//
+// Panics if the object is not a (Map).
+func (v *Value) MustObjxMap() Map {
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ return v.data.((Map))
+}
+
+// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault
+// value or nil if the value is not a [](Map).
+func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) {
+ if s, ok := v.data.([]Map); ok {
+ return s
+ }
+
+ if s, ok := v.data.([]map[string]interface{}); ok {
+ result := make([]Map, len(s))
+ for i := range s {
+ result[i] = s[i]
+ }
+ return result
+ }
+
+ s, ok := v.data.([]interface{})
+ if !ok {
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+ }
+
+ result := make([]Map, len(s))
+ for i := range s {
+ switch s[i].(type) {
+ case Map:
+ result[i] = s[i].(Map)
+ case map[string]interface{}:
+ result[i] = New(s[i])
+ default:
+ return nil
+ }
+ }
+ return result
+}
+
+// MustObjxMapSlice gets the value as a [](Map).
+//
+// Panics if the object is not a [](Map).
+func (v *Value) MustObjxMapSlice() [](Map) {
+ if s := v.ObjxMapSlice(); s != nil {
+ return s
+ }
+ return v.data.([](Map))
+}
+
+// IsObjxMap gets whether the object contained is a (Map) or not.
+func (v *Value) IsObjxMap() bool {
+ _, ok := v.data.((Map))
+ if !ok {
+ _, ok = v.data.(map[string]interface{})
+ }
+ return ok
+}
+
+// IsObjxMapSlice gets whether the object contained is a [](Map) or not.
+func (v *Value) IsObjxMapSlice() bool {
+ _, ok := v.data.([](Map))
+ if !ok {
+ _, ok = v.data.([]map[string]interface{})
+ if !ok {
+ s, ok := v.data.([]interface{})
+ if ok {
+ for i := range s {
+ switch s[i].(type) {
+ case Map:
+ case map[string]interface{}:
+ default:
+ return false
+ }
+ }
+ return true
+ }
+ }
+ }
+
+ return ok
+}
+
+// EachObjxMap calls the specified callback for each object
+// in the [](Map).
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value {
+ for index, val := range v.MustObjxMapSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereObjxMap uses the specified decider function to select items
+// from the [](Map). The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value {
+ var selected [](Map)
+ v.EachObjxMap(func(index int, val Map) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupObjxMap uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][](Map).
+func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
+ groups := make(map[string][](Map))
+ v.EachObjxMap(func(index int, val Map) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([](Map), 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceObjxMap uses the specified function to replace each (Map)s
+// by iterating each item. The data in the returned result will be a
+// [](Map) containing the replaced items.
+func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value {
+ arr := v.MustObjxMapSlice()
+ replaced := make([](Map), len(arr))
+ v.EachObjxMap(func(index int, val Map) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectObjxMap uses the specified collector function to collect a value
+// for each of the (Map)s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value {
+ arr := v.MustObjxMapSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachObjxMap(func(index int, val Map) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/type_specific_codegen.go b/test/integration/vendor/github.com/stretchr/objx/type_specific_codegen.go
new file mode 100644
index 000000000..45850456e
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/type_specific_codegen.go
@@ -0,0 +1,2261 @@
+package objx
+
+/*
+ Inter (interface{} and []interface{})
+*/
+
+// Inter gets the value as a interface{}, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Inter(optionalDefault ...interface{}) interface{} {
+ if s, ok := v.data.(interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInter gets the value as a interface{}.
+//
+// Panics if the object is not a interface{}.
+func (v *Value) MustInter() interface{} {
+ return v.data.(interface{})
+}
+
+// InterSlice gets the value as a []interface{}, returns the optionalDefault
+// value or nil if the value is not a []interface{}.
+func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} {
+ if s, ok := v.data.([]interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInterSlice gets the value as a []interface{}.
+//
+// Panics if the object is not a []interface{}.
+func (v *Value) MustInterSlice() []interface{} {
+ return v.data.([]interface{})
+}
+
+// IsInter gets whether the object contained is a interface{} or not.
+func (v *Value) IsInter() bool {
+ _, ok := v.data.(interface{})
+ return ok
+}
+
+// IsInterSlice gets whether the object contained is a []interface{} or not.
+func (v *Value) IsInterSlice() bool {
+ _, ok := v.data.([]interface{})
+ return ok
+}
+
+// EachInter calls the specified callback for each object
+// in the []interface{}.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInter(callback func(int, interface{}) bool) *Value {
+ for index, val := range v.MustInterSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereInter uses the specified decider function to select items
+// from the []interface{}. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value {
+ var selected []interface{}
+ v.EachInter(func(index int, val interface{}) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupInter uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]interface{}.
+func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value {
+ groups := make(map[string][]interface{})
+ v.EachInter(func(index int, val interface{}) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]interface{}, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceInter uses the specified function to replace each interface{}s
+// by iterating each item. The data in the returned result will be a
+// []interface{} containing the replaced items.
+func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value {
+ arr := v.MustInterSlice()
+ replaced := make([]interface{}, len(arr))
+ v.EachInter(func(index int, val interface{}) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectInter uses the specified collector function to collect a value
+// for each of the interface{}s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value {
+ arr := v.MustInterSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachInter(func(index int, val interface{}) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Bool (bool and []bool)
+*/
+
+// Bool gets the value as a bool, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Bool(optionalDefault ...bool) bool {
+ if s, ok := v.data.(bool); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return false
+}
+
+// MustBool gets the value as a bool.
+//
+// Panics if the object is not a bool.
+func (v *Value) MustBool() bool {
+ return v.data.(bool)
+}
+
+// BoolSlice gets the value as a []bool, returns the optionalDefault
+// value or nil if the value is not a []bool.
+func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool {
+ if s, ok := v.data.([]bool); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustBoolSlice gets the value as a []bool.
+//
+// Panics if the object is not a []bool.
+func (v *Value) MustBoolSlice() []bool {
+ return v.data.([]bool)
+}
+
+// IsBool gets whether the object contained is a bool or not.
+func (v *Value) IsBool() bool {
+ _, ok := v.data.(bool)
+ return ok
+}
+
+// IsBoolSlice gets whether the object contained is a []bool or not.
+func (v *Value) IsBoolSlice() bool {
+ _, ok := v.data.([]bool)
+ return ok
+}
+
+// EachBool calls the specified callback for each object
+// in the []bool.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachBool(callback func(int, bool) bool) *Value {
+ for index, val := range v.MustBoolSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereBool uses the specified decider function to select items
+// from the []bool. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereBool(decider func(int, bool) bool) *Value {
+ var selected []bool
+ v.EachBool(func(index int, val bool) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupBool uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]bool.
+func (v *Value) GroupBool(grouper func(int, bool) string) *Value {
+ groups := make(map[string][]bool)
+ v.EachBool(func(index int, val bool) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]bool, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceBool uses the specified function to replace each bools
+// by iterating each item. The data in the returned result will be a
+// []bool containing the replaced items.
+func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value {
+ arr := v.MustBoolSlice()
+ replaced := make([]bool, len(arr))
+ v.EachBool(func(index int, val bool) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectBool uses the specified collector function to collect a value
+// for each of the bools in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value {
+ arr := v.MustBoolSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachBool(func(index int, val bool) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Str (string and []string)
+*/
+
+// Str gets the value as a string, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Str(optionalDefault ...string) string {
+ if s, ok := v.data.(string); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return ""
+}
+
+// MustStr gets the value as a string.
+//
+// Panics if the object is not a string.
+func (v *Value) MustStr() string {
+ return v.data.(string)
+}
+
+// StrSlice gets the value as a []string, returns the optionalDefault
+// value or nil if the value is not a []string.
+func (v *Value) StrSlice(optionalDefault ...[]string) []string {
+ if s, ok := v.data.([]string); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustStrSlice gets the value as a []string.
+//
+// Panics if the object is not a []string.
+func (v *Value) MustStrSlice() []string {
+ return v.data.([]string)
+}
+
+// IsStr gets whether the object contained is a string or not.
+func (v *Value) IsStr() bool {
+ _, ok := v.data.(string)
+ return ok
+}
+
+// IsStrSlice gets whether the object contained is a []string or not.
+func (v *Value) IsStrSlice() bool {
+ _, ok := v.data.([]string)
+ return ok
+}
+
+// EachStr calls the specified callback for each object
+// in the []string.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachStr(callback func(int, string) bool) *Value {
+ for index, val := range v.MustStrSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereStr uses the specified decider function to select items
+// from the []string. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereStr(decider func(int, string) bool) *Value {
+ var selected []string
+ v.EachStr(func(index int, val string) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupStr uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]string.
+func (v *Value) GroupStr(grouper func(int, string) string) *Value {
+ groups := make(map[string][]string)
+ v.EachStr(func(index int, val string) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]string, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceStr uses the specified function to replace each strings
+// by iterating each item. The data in the returned result will be a
+// []string containing the replaced items.
+func (v *Value) ReplaceStr(replacer func(int, string) string) *Value {
+ arr := v.MustStrSlice()
+ replaced := make([]string, len(arr))
+ v.EachStr(func(index int, val string) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectStr uses the specified collector function to collect a value
+// for each of the strings in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectStr(collector func(int, string) interface{}) *Value {
+ arr := v.MustStrSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachStr(func(index int, val string) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Int (int and []int)
+*/
+
+// Int gets the value as a int, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int(optionalDefault ...int) int {
+ if s, ok := v.data.(int); ok {
+ return s
+ }
+ if s, ok := v.data.(float64); ok {
+ if float64(int(s)) == s {
+ return int(s)
+ }
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt gets the value as a int.
+//
+// Panics if the object is not a int.
+func (v *Value) MustInt() int {
+ if s, ok := v.data.(float64); ok {
+ if float64(int(s)) == s {
+ return int(s)
+ }
+ }
+ return v.data.(int)
+}
+
+// IntSlice gets the value as a []int, returns the optionalDefault
+// value or nil if the value is not a []int.
+func (v *Value) IntSlice(optionalDefault ...[]int) []int {
+ if s, ok := v.data.([]int); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustIntSlice gets the value as a []int.
+//
+// Panics if the object is not a []int.
+func (v *Value) MustIntSlice() []int {
+ return v.data.([]int)
+}
+
+// IsInt gets whether the object contained is a int or not.
+func (v *Value) IsInt() bool {
+ _, ok := v.data.(int)
+ return ok
+}
+
+// IsIntSlice gets whether the object contained is a []int or not.
+func (v *Value) IsIntSlice() bool {
+ _, ok := v.data.([]int)
+ return ok
+}
+
+// EachInt calls the specified callback for each object
+// in the []int.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt(callback func(int, int) bool) *Value {
+ for index, val := range v.MustIntSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereInt uses the specified decider function to select items
+// from the []int. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt(decider func(int, int) bool) *Value {
+ var selected []int
+ v.EachInt(func(index int, val int) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupInt uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int.
+func (v *Value) GroupInt(grouper func(int, int) string) *Value {
+ groups := make(map[string][]int)
+ v.EachInt(func(index int, val int) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceInt uses the specified function to replace each ints
+// by iterating each item. The data in the returned result will be a
+// []int containing the replaced items.
+func (v *Value) ReplaceInt(replacer func(int, int) int) *Value {
+ arr := v.MustIntSlice()
+ replaced := make([]int, len(arr))
+ v.EachInt(func(index int, val int) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectInt uses the specified collector function to collect a value
+// for each of the ints in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt(collector func(int, int) interface{}) *Value {
+ arr := v.MustIntSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachInt(func(index int, val int) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Int8 (int8 and []int8)
+*/
+
+// Int8 gets the value as a int8, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int8(optionalDefault ...int8) int8 {
+ if s, ok := v.data.(int8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt8 gets the value as a int8.
+//
+// Panics if the object is not a int8.
+func (v *Value) MustInt8() int8 {
+ return v.data.(int8)
+}
+
+// Int8Slice gets the value as a []int8, returns the optionalDefault
+// value or nil if the value is not a []int8.
+func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 {
+ if s, ok := v.data.([]int8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt8Slice gets the value as a []int8.
+//
+// Panics if the object is not a []int8.
+func (v *Value) MustInt8Slice() []int8 {
+ return v.data.([]int8)
+}
+
+// IsInt8 gets whether the object contained is a int8 or not.
+func (v *Value) IsInt8() bool {
+ _, ok := v.data.(int8)
+ return ok
+}
+
+// IsInt8Slice gets whether the object contained is a []int8 or not.
+func (v *Value) IsInt8Slice() bool {
+ _, ok := v.data.([]int8)
+ return ok
+}
+
+// EachInt8 calls the specified callback for each object
+// in the []int8.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt8(callback func(int, int8) bool) *Value {
+ for index, val := range v.MustInt8Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereInt8 uses the specified decider function to select items
+// from the []int8. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt8(decider func(int, int8) bool) *Value {
+ var selected []int8
+ v.EachInt8(func(index int, val int8) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupInt8 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int8.
+func (v *Value) GroupInt8(grouper func(int, int8) string) *Value {
+ groups := make(map[string][]int8)
+ v.EachInt8(func(index int, val int8) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int8, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceInt8 uses the specified function to replace each int8s
+// by iterating each item. The data in the returned result will be a
+// []int8 containing the replaced items.
+func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value {
+ arr := v.MustInt8Slice()
+ replaced := make([]int8, len(arr))
+ v.EachInt8(func(index int, val int8) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectInt8 uses the specified collector function to collect a value
+// for each of the int8s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value {
+ arr := v.MustInt8Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachInt8(func(index int, val int8) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Int16 (int16 and []int16)
+*/
+
+// Int16 gets the value as a int16, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int16(optionalDefault ...int16) int16 {
+ if s, ok := v.data.(int16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt16 gets the value as a int16.
+//
+// Panics if the object is not a int16.
+func (v *Value) MustInt16() int16 {
+ return v.data.(int16)
+}
+
+// Int16Slice gets the value as a []int16, returns the optionalDefault
+// value or nil if the value is not a []int16.
+func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 {
+ if s, ok := v.data.([]int16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt16Slice gets the value as a []int16.
+//
+// Panics if the object is not a []int16.
+func (v *Value) MustInt16Slice() []int16 {
+ return v.data.([]int16)
+}
+
+// IsInt16 gets whether the object contained is a int16 or not.
+func (v *Value) IsInt16() bool {
+ _, ok := v.data.(int16)
+ return ok
+}
+
+// IsInt16Slice gets whether the object contained is a []int16 or not.
+func (v *Value) IsInt16Slice() bool {
+ _, ok := v.data.([]int16)
+ return ok
+}
+
+// EachInt16 calls the specified callback for each object
+// in the []int16.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt16(callback func(int, int16) bool) *Value {
+ for index, val := range v.MustInt16Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereInt16 uses the specified decider function to select items
+// from the []int16. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt16(decider func(int, int16) bool) *Value {
+ var selected []int16
+ v.EachInt16(func(index int, val int16) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupInt16 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int16.
+func (v *Value) GroupInt16(grouper func(int, int16) string) *Value {
+ groups := make(map[string][]int16)
+ v.EachInt16(func(index int, val int16) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int16, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceInt16 uses the specified function to replace each int16s
+// by iterating each item. The data in the returned result will be a
+// []int16 containing the replaced items.
+func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value {
+ arr := v.MustInt16Slice()
+ replaced := make([]int16, len(arr))
+ v.EachInt16(func(index int, val int16) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectInt16 uses the specified collector function to collect a value
+// for each of the int16s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value {
+ arr := v.MustInt16Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachInt16(func(index int, val int16) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Int32 (int32 and []int32)
+*/
+
+// Int32 gets the value as a int32, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int32(optionalDefault ...int32) int32 {
+ if s, ok := v.data.(int32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt32 gets the value as a int32.
+//
+// Panics if the object is not a int32.
+func (v *Value) MustInt32() int32 {
+ return v.data.(int32)
+}
+
+// Int32Slice gets the value as a []int32, returns the optionalDefault
+// value or nil if the value is not a []int32.
+func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 {
+ if s, ok := v.data.([]int32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt32Slice gets the value as a []int32.
+//
+// Panics if the object is not a []int32.
+func (v *Value) MustInt32Slice() []int32 {
+ return v.data.([]int32)
+}
+
+// IsInt32 gets whether the object contained is a int32 or not.
+func (v *Value) IsInt32() bool {
+ _, ok := v.data.(int32)
+ return ok
+}
+
+// IsInt32Slice gets whether the object contained is a []int32 or not.
+func (v *Value) IsInt32Slice() bool {
+ _, ok := v.data.([]int32)
+ return ok
+}
+
+// EachInt32 calls the specified callback for each object
+// in the []int32.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt32(callback func(int, int32) bool) *Value {
+ for index, val := range v.MustInt32Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereInt32 uses the specified decider function to select items
+// from the []int32. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt32(decider func(int, int32) bool) *Value {
+ var selected []int32
+ v.EachInt32(func(index int, val int32) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupInt32 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int32.
+func (v *Value) GroupInt32(grouper func(int, int32) string) *Value {
+ groups := make(map[string][]int32)
+ v.EachInt32(func(index int, val int32) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int32, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceInt32 uses the specified function to replace each int32s
+// by iterating each item. The data in the returned result will be a
+// []int32 containing the replaced items.
+func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value {
+ arr := v.MustInt32Slice()
+ replaced := make([]int32, len(arr))
+ v.EachInt32(func(index int, val int32) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectInt32 uses the specified collector function to collect a value
+// for each of the int32s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value {
+ arr := v.MustInt32Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachInt32(func(index int, val int32) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Int64 (int64 and []int64)
+*/
+
+// Int64 gets the value as a int64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int64(optionalDefault ...int64) int64 {
+ if s, ok := v.data.(int64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt64 gets the value as a int64.
+//
+// Panics if the object is not a int64.
+func (v *Value) MustInt64() int64 {
+ return v.data.(int64)
+}
+
+// Int64Slice gets the value as a []int64, returns the optionalDefault
+// value or nil if the value is not a []int64.
+func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 {
+ if s, ok := v.data.([]int64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt64Slice gets the value as a []int64.
+//
+// Panics if the object is not a []int64.
+func (v *Value) MustInt64Slice() []int64 {
+ return v.data.([]int64)
+}
+
+// IsInt64 gets whether the object contained is a int64 or not.
+func (v *Value) IsInt64() bool {
+ _, ok := v.data.(int64)
+ return ok
+}
+
+// IsInt64Slice gets whether the object contained is a []int64 or not.
+func (v *Value) IsInt64Slice() bool {
+ _, ok := v.data.([]int64)
+ return ok
+}
+
+// EachInt64 calls the specified callback for each object
+// in the []int64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt64(callback func(int, int64) bool) *Value {
+ for index, val := range v.MustInt64Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereInt64 uses the specified decider function to select items
+// from the []int64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt64(decider func(int, int64) bool) *Value {
+ var selected []int64
+ v.EachInt64(func(index int, val int64) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupInt64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int64.
+func (v *Value) GroupInt64(grouper func(int, int64) string) *Value {
+ groups := make(map[string][]int64)
+ v.EachInt64(func(index int, val int64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceInt64 uses the specified function to replace each int64s
+// by iterating each item. The data in the returned result will be a
+// []int64 containing the replaced items.
+func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value {
+ arr := v.MustInt64Slice()
+ replaced := make([]int64, len(arr))
+ v.EachInt64(func(index int, val int64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectInt64 uses the specified collector function to collect a value
+// for each of the int64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value {
+ arr := v.MustInt64Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachInt64(func(index int, val int64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Uint (uint and []uint)
+*/
+
+// Uint gets the value as a uint, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint(optionalDefault ...uint) uint {
+ if s, ok := v.data.(uint); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint gets the value as a uint.
+//
+// Panics if the object is not a uint.
+func (v *Value) MustUint() uint {
+ return v.data.(uint)
+}
+
+// UintSlice gets the value as a []uint, returns the optionalDefault
+// value or nil if the value is not a []uint.
+func (v *Value) UintSlice(optionalDefault ...[]uint) []uint {
+ if s, ok := v.data.([]uint); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUintSlice gets the value as a []uint.
+//
+// Panics if the object is not a []uint.
+func (v *Value) MustUintSlice() []uint {
+ return v.data.([]uint)
+}
+
+// IsUint gets whether the object contained is a uint or not.
+func (v *Value) IsUint() bool {
+ _, ok := v.data.(uint)
+ return ok
+}
+
+// IsUintSlice gets whether the object contained is a []uint or not.
+func (v *Value) IsUintSlice() bool {
+ _, ok := v.data.([]uint)
+ return ok
+}
+
+// EachUint calls the specified callback for each object
+// in the []uint.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint(callback func(int, uint) bool) *Value {
+ for index, val := range v.MustUintSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereUint uses the specified decider function to select items
+// from the []uint. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint(decider func(int, uint) bool) *Value {
+ var selected []uint
+ v.EachUint(func(index int, val uint) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupUint uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint.
+func (v *Value) GroupUint(grouper func(int, uint) string) *Value {
+ groups := make(map[string][]uint)
+ v.EachUint(func(index int, val uint) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceUint uses the specified function to replace each uints
+// by iterating each item. The data in the returned result will be a
+// []uint containing the replaced items.
+func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value {
+ arr := v.MustUintSlice()
+ replaced := make([]uint, len(arr))
+ v.EachUint(func(index int, val uint) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectUint uses the specified collector function to collect a value
+// for each of the uints in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value {
+ arr := v.MustUintSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachUint(func(index int, val uint) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Uint8 (uint8 and []uint8)
+*/
+
+// Uint8 gets the value as a uint8, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint8(optionalDefault ...uint8) uint8 {
+ if s, ok := v.data.(uint8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint8 gets the value as a uint8.
+//
+// Panics if the object is not a uint8.
+func (v *Value) MustUint8() uint8 {
+ return v.data.(uint8)
+}
+
+// Uint8Slice gets the value as a []uint8, returns the optionalDefault
+// value or nil if the value is not a []uint8.
+func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 {
+ if s, ok := v.data.([]uint8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint8Slice gets the value as a []uint8.
+//
+// Panics if the object is not a []uint8.
+func (v *Value) MustUint8Slice() []uint8 {
+ return v.data.([]uint8)
+}
+
+// IsUint8 gets whether the object contained is a uint8 or not.
+func (v *Value) IsUint8() bool {
+ _, ok := v.data.(uint8)
+ return ok
+}
+
+// IsUint8Slice gets whether the object contained is a []uint8 or not.
+func (v *Value) IsUint8Slice() bool {
+ _, ok := v.data.([]uint8)
+ return ok
+}
+
+// EachUint8 calls the specified callback for each object
+// in the []uint8.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint8(callback func(int, uint8) bool) *Value {
+ for index, val := range v.MustUint8Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereUint8 uses the specified decider function to select items
+// from the []uint8. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value {
+ var selected []uint8
+ v.EachUint8(func(index int, val uint8) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupUint8 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint8.
+func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value {
+ groups := make(map[string][]uint8)
+ v.EachUint8(func(index int, val uint8) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint8, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceUint8 uses the specified function to replace each uint8s
+// by iterating each item. The data in the returned result will be a
+// []uint8 containing the replaced items.
+func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value {
+ arr := v.MustUint8Slice()
+ replaced := make([]uint8, len(arr))
+ v.EachUint8(func(index int, val uint8) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectUint8 uses the specified collector function to collect a value
+// for each of the uint8s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value {
+ arr := v.MustUint8Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachUint8(func(index int, val uint8) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Uint16 (uint16 and []uint16)
+*/
+
+// Uint16 gets the value as a uint16, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint16(optionalDefault ...uint16) uint16 {
+ if s, ok := v.data.(uint16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint16 gets the value as a uint16.
+//
+// Panics if the object is not a uint16.
+func (v *Value) MustUint16() uint16 {
+ return v.data.(uint16)
+}
+
+// Uint16Slice gets the value as a []uint16, returns the optionalDefault
+// value or nil if the value is not a []uint16.
+func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 {
+ if s, ok := v.data.([]uint16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint16Slice gets the value as a []uint16.
+//
+// Panics if the object is not a []uint16.
+func (v *Value) MustUint16Slice() []uint16 {
+ return v.data.([]uint16)
+}
+
+// IsUint16 gets whether the object contained is a uint16 or not.
+func (v *Value) IsUint16() bool {
+ _, ok := v.data.(uint16)
+ return ok
+}
+
+// IsUint16Slice gets whether the object contained is a []uint16 or not.
+func (v *Value) IsUint16Slice() bool {
+ _, ok := v.data.([]uint16)
+ return ok
+}
+
+// EachUint16 calls the specified callback for each object
+// in the []uint16.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint16(callback func(int, uint16) bool) *Value {
+ for index, val := range v.MustUint16Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereUint16 uses the specified decider function to select items
+// from the []uint16. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value {
+ var selected []uint16
+ v.EachUint16(func(index int, val uint16) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupUint16 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint16.
+func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value {
+ groups := make(map[string][]uint16)
+ v.EachUint16(func(index int, val uint16) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint16, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceUint16 uses the specified function to replace each uint16s
+// by iterating each item. The data in the returned result will be a
+// []uint16 containing the replaced items.
+func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value {
+ arr := v.MustUint16Slice()
+ replaced := make([]uint16, len(arr))
+ v.EachUint16(func(index int, val uint16) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectUint16 uses the specified collector function to collect a value
+// for each of the uint16s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value {
+ arr := v.MustUint16Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachUint16(func(index int, val uint16) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Uint32 (uint32 and []uint32)
+*/
+
+// Uint32 gets the value as a uint32, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint32(optionalDefault ...uint32) uint32 {
+ if s, ok := v.data.(uint32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint32 gets the value as a uint32.
+//
+// Panics if the object is not a uint32.
+func (v *Value) MustUint32() uint32 {
+ return v.data.(uint32)
+}
+
+// Uint32Slice gets the value as a []uint32, returns the optionalDefault
+// value or nil if the value is not a []uint32.
+func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 {
+ if s, ok := v.data.([]uint32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint32Slice gets the value as a []uint32.
+//
+// Panics if the object is not a []uint32.
+func (v *Value) MustUint32Slice() []uint32 {
+ return v.data.([]uint32)
+}
+
+// IsUint32 gets whether the object contained is a uint32 or not.
+func (v *Value) IsUint32() bool {
+ _, ok := v.data.(uint32)
+ return ok
+}
+
+// IsUint32Slice gets whether the object contained is a []uint32 or not.
+func (v *Value) IsUint32Slice() bool {
+ _, ok := v.data.([]uint32)
+ return ok
+}
+
+// EachUint32 calls the specified callback for each object
+// in the []uint32.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint32(callback func(int, uint32) bool) *Value {
+ for index, val := range v.MustUint32Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereUint32 uses the specified decider function to select items
+// from the []uint32. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value {
+ var selected []uint32
+ v.EachUint32(func(index int, val uint32) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupUint32 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint32.
+func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value {
+ groups := make(map[string][]uint32)
+ v.EachUint32(func(index int, val uint32) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint32, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceUint32 uses the specified function to replace each uint32s
+// by iterating each item. The data in the returned result will be a
+// []uint32 containing the replaced items.
+func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value {
+ arr := v.MustUint32Slice()
+ replaced := make([]uint32, len(arr))
+ v.EachUint32(func(index int, val uint32) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectUint32 uses the specified collector function to collect a value
+// for each of the uint32s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value {
+ arr := v.MustUint32Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachUint32(func(index int, val uint32) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Uint64 (uint64 and []uint64)
+*/
+
+// Uint64 gets the value as a uint64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint64(optionalDefault ...uint64) uint64 {
+ if s, ok := v.data.(uint64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint64 gets the value as a uint64.
+//
+// Panics if the object is not a uint64.
+func (v *Value) MustUint64() uint64 {
+ return v.data.(uint64)
+}
+
+// Uint64Slice gets the value as a []uint64, returns the optionalDefault
+// value or nil if the value is not a []uint64.
+func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 {
+ if s, ok := v.data.([]uint64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint64Slice gets the value as a []uint64.
+//
+// Panics if the object is not a []uint64.
+func (v *Value) MustUint64Slice() []uint64 {
+ return v.data.([]uint64)
+}
+
+// IsUint64 gets whether the object contained is a uint64 or not.
+func (v *Value) IsUint64() bool {
+ _, ok := v.data.(uint64)
+ return ok
+}
+
+// IsUint64Slice gets whether the object contained is a []uint64 or not.
+func (v *Value) IsUint64Slice() bool {
+ _, ok := v.data.([]uint64)
+ return ok
+}
+
+// EachUint64 calls the specified callback for each object
+// in the []uint64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint64(callback func(int, uint64) bool) *Value {
+ for index, val := range v.MustUint64Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereUint64 uses the specified decider function to select items
+// from the []uint64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value {
+ var selected []uint64
+ v.EachUint64(func(index int, val uint64) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupUint64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint64.
+func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value {
+ groups := make(map[string][]uint64)
+ v.EachUint64(func(index int, val uint64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceUint64 uses the specified function to replace each uint64s
+// by iterating each item. The data in the returned result will be a
+// []uint64 containing the replaced items.
+func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value {
+ arr := v.MustUint64Slice()
+ replaced := make([]uint64, len(arr))
+ v.EachUint64(func(index int, val uint64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectUint64 uses the specified collector function to collect a value
+// for each of the uint64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value {
+ arr := v.MustUint64Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachUint64(func(index int, val uint64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Uintptr (uintptr and []uintptr)
+*/
+
+// Uintptr gets the value as a uintptr, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr {
+ if s, ok := v.data.(uintptr); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUintptr gets the value as a uintptr.
+//
+// Panics if the object is not a uintptr.
+func (v *Value) MustUintptr() uintptr {
+ return v.data.(uintptr)
+}
+
+// UintptrSlice gets the value as a []uintptr, returns the optionalDefault
+// value or nil if the value is not a []uintptr.
+func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr {
+ if s, ok := v.data.([]uintptr); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUintptrSlice gets the value as a []uintptr.
+//
+// Panics if the object is not a []uintptr.
+func (v *Value) MustUintptrSlice() []uintptr {
+ return v.data.([]uintptr)
+}
+
+// IsUintptr gets whether the object contained is a uintptr or not.
+func (v *Value) IsUintptr() bool {
+ _, ok := v.data.(uintptr)
+ return ok
+}
+
+// IsUintptrSlice gets whether the object contained is a []uintptr or not.
+func (v *Value) IsUintptrSlice() bool {
+ _, ok := v.data.([]uintptr)
+ return ok
+}
+
+// EachUintptr calls the specified callback for each object
+// in the []uintptr.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value {
+ for index, val := range v.MustUintptrSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereUintptr uses the specified decider function to select items
+// from the []uintptr. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value {
+ var selected []uintptr
+ v.EachUintptr(func(index int, val uintptr) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupUintptr uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uintptr.
+func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value {
+ groups := make(map[string][]uintptr)
+ v.EachUintptr(func(index int, val uintptr) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uintptr, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceUintptr uses the specified function to replace each uintptrs
+// by iterating each item. The data in the returned result will be a
+// []uintptr containing the replaced items.
+func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value {
+ arr := v.MustUintptrSlice()
+ replaced := make([]uintptr, len(arr))
+ v.EachUintptr(func(index int, val uintptr) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectUintptr uses the specified collector function to collect a value
+// for each of the uintptrs in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value {
+ arr := v.MustUintptrSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachUintptr(func(index int, val uintptr) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Float32 (float32 and []float32)
+*/
+
+// Float32 gets the value as a float32, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Float32(optionalDefault ...float32) float32 {
+ if s, ok := v.data.(float32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustFloat32 gets the value as a float32.
+//
+// Panics if the object is not a float32.
+func (v *Value) MustFloat32() float32 {
+ return v.data.(float32)
+}
+
+// Float32Slice gets the value as a []float32, returns the optionalDefault
+// value or nil if the value is not a []float32.
+func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 {
+ if s, ok := v.data.([]float32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustFloat32Slice gets the value as a []float32.
+//
+// Panics if the object is not a []float32.
+func (v *Value) MustFloat32Slice() []float32 {
+ return v.data.([]float32)
+}
+
+// IsFloat32 gets whether the object contained is a float32 or not.
+func (v *Value) IsFloat32() bool {
+ _, ok := v.data.(float32)
+ return ok
+}
+
+// IsFloat32Slice gets whether the object contained is a []float32 or not.
+func (v *Value) IsFloat32Slice() bool {
+ _, ok := v.data.([]float32)
+ return ok
+}
+
+// EachFloat32 calls the specified callback for each object
+// in the []float32.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachFloat32(callback func(int, float32) bool) *Value {
+ for index, val := range v.MustFloat32Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereFloat32 uses the specified decider function to select items
+// from the []float32. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value {
+ var selected []float32
+ v.EachFloat32(func(index int, val float32) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupFloat32 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]float32.
+func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value {
+ groups := make(map[string][]float32)
+ v.EachFloat32(func(index int, val float32) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]float32, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceFloat32 uses the specified function to replace each float32s
+// by iterating each item. The data in the returned result will be a
+// []float32 containing the replaced items.
+func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value {
+ arr := v.MustFloat32Slice()
+ replaced := make([]float32, len(arr))
+ v.EachFloat32(func(index int, val float32) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectFloat32 uses the specified collector function to collect a value
+// for each of the float32s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value {
+ arr := v.MustFloat32Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachFloat32(func(index int, val float32) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Float64 (float64 and []float64)
+*/
+
+// Float64 gets the value as a float64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Float64(optionalDefault ...float64) float64 {
+ if s, ok := v.data.(float64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustFloat64 gets the value as a float64.
+//
+// Panics if the object is not a float64.
+func (v *Value) MustFloat64() float64 {
+ return v.data.(float64)
+}
+
+// Float64Slice gets the value as a []float64, returns the optionalDefault
+// value or nil if the value is not a []float64.
+func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 {
+ if s, ok := v.data.([]float64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustFloat64Slice gets the value as a []float64.
+//
+// Panics if the object is not a []float64.
+func (v *Value) MustFloat64Slice() []float64 {
+ return v.data.([]float64)
+}
+
+// IsFloat64 gets whether the object contained is a float64 or not.
+func (v *Value) IsFloat64() bool {
+ _, ok := v.data.(float64)
+ return ok
+}
+
+// IsFloat64Slice gets whether the object contained is a []float64 or not.
+func (v *Value) IsFloat64Slice() bool {
+ _, ok := v.data.([]float64)
+ return ok
+}
+
+// EachFloat64 calls the specified callback for each object
+// in the []float64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachFloat64(callback func(int, float64) bool) *Value {
+ for index, val := range v.MustFloat64Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereFloat64 uses the specified decider function to select items
+// from the []float64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value {
+ var selected []float64
+ v.EachFloat64(func(index int, val float64) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupFloat64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]float64.
+func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value {
+ groups := make(map[string][]float64)
+ v.EachFloat64(func(index int, val float64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]float64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceFloat64 uses the specified function to replace each float64s
+// by iterating each item. The data in the returned result will be a
+// []float64 containing the replaced items.
+func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value {
+ arr := v.MustFloat64Slice()
+ replaced := make([]float64, len(arr))
+ v.EachFloat64(func(index int, val float64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectFloat64 uses the specified collector function to collect a value
+// for each of the float64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value {
+ arr := v.MustFloat64Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachFloat64(func(index int, val float64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Complex64 (complex64 and []complex64)
+*/
+
+// Complex64 gets the value as a complex64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Complex64(optionalDefault ...complex64) complex64 {
+ if s, ok := v.data.(complex64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustComplex64 gets the value as a complex64.
+//
+// Panics if the object is not a complex64.
+func (v *Value) MustComplex64() complex64 {
+ return v.data.(complex64)
+}
+
+// Complex64Slice gets the value as a []complex64, returns the optionalDefault
+// value or nil if the value is not a []complex64.
+func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 {
+ if s, ok := v.data.([]complex64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustComplex64Slice gets the value as a []complex64.
+//
+// Panics if the object is not a []complex64.
+func (v *Value) MustComplex64Slice() []complex64 {
+ return v.data.([]complex64)
+}
+
+// IsComplex64 gets whether the object contained is a complex64 or not.
+func (v *Value) IsComplex64() bool {
+ _, ok := v.data.(complex64)
+ return ok
+}
+
+// IsComplex64Slice gets whether the object contained is a []complex64 or not.
+func (v *Value) IsComplex64Slice() bool {
+ _, ok := v.data.([]complex64)
+ return ok
+}
+
+// EachComplex64 calls the specified callback for each object
+// in the []complex64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value {
+ for index, val := range v.MustComplex64Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereComplex64 uses the specified decider function to select items
+// from the []complex64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value {
+ var selected []complex64
+ v.EachComplex64(func(index int, val complex64) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupComplex64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]complex64.
+func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value {
+ groups := make(map[string][]complex64)
+ v.EachComplex64(func(index int, val complex64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]complex64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceComplex64 uses the specified function to replace each complex64s
+// by iterating each item. The data in the returned result will be a
+// []complex64 containing the replaced items.
+func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value {
+ arr := v.MustComplex64Slice()
+ replaced := make([]complex64, len(arr))
+ v.EachComplex64(func(index int, val complex64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectComplex64 uses the specified collector function to collect a value
+// for each of the complex64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value {
+ arr := v.MustComplex64Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachComplex64(func(index int, val complex64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ Complex128 (complex128 and []complex128)
+*/
+
+// Complex128 gets the value as a complex128, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Complex128(optionalDefault ...complex128) complex128 {
+ if s, ok := v.data.(complex128); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustComplex128 gets the value as a complex128.
+//
+// Panics if the object is not a complex128.
+func (v *Value) MustComplex128() complex128 {
+ return v.data.(complex128)
+}
+
+// Complex128Slice gets the value as a []complex128, returns the optionalDefault
+// value or nil if the value is not a []complex128.
+func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 {
+ if s, ok := v.data.([]complex128); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustComplex128Slice gets the value as a []complex128.
+//
+// Panics if the object is not a []complex128.
+func (v *Value) MustComplex128Slice() []complex128 {
+ return v.data.([]complex128)
+}
+
+// IsComplex128 gets whether the object contained is a complex128 or not.
+func (v *Value) IsComplex128() bool {
+ _, ok := v.data.(complex128)
+ return ok
+}
+
+// IsComplex128Slice gets whether the object contained is a []complex128 or not.
+func (v *Value) IsComplex128Slice() bool {
+ _, ok := v.data.([]complex128)
+ return ok
+}
+
+// EachComplex128 calls the specified callback for each object
+// in the []complex128.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value {
+ for index, val := range v.MustComplex128Slice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereComplex128 uses the specified decider function to select items
+// from the []complex128. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value {
+ var selected []complex128
+ v.EachComplex128(func(index int, val complex128) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupComplex128 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]complex128.
+func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value {
+ groups := make(map[string][]complex128)
+ v.EachComplex128(func(index int, val complex128) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]complex128, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceComplex128 uses the specified function to replace each complex128s
+// by iterating each item. The data in the returned result will be a
+// []complex128 containing the replaced items.
+func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value {
+ arr := v.MustComplex128Slice()
+ replaced := make([]complex128, len(arr))
+ v.EachComplex128(func(index int, val complex128) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectComplex128 uses the specified collector function to collect a value
+// for each of the complex128s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value {
+ arr := v.MustComplex128Slice()
+ collected := make([]interface{}, len(arr))
+ v.EachComplex128(func(index int, val complex128) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
diff --git a/test/integration/vendor/github.com/stretchr/objx/value.go b/test/integration/vendor/github.com/stretchr/objx/value.go
new file mode 100644
index 000000000..4e5f9b77e
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/objx/value.go
@@ -0,0 +1,159 @@
+package objx
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Value provides methods for extracting interface{} data in various
+// types.
+type Value struct {
+ // data contains the raw data being managed by this Value
+ data interface{}
+}
+
+// Data returns the raw data contained by this Value
+func (v *Value) Data() interface{} {
+ return v.data
+}
+
+// String returns the value always as a string
+func (v *Value) String() string {
+ switch {
+ case v.IsNil():
+ return ""
+ case v.IsStr():
+ return v.Str()
+ case v.IsBool():
+ return strconv.FormatBool(v.Bool())
+ case v.IsFloat32():
+ return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32)
+ case v.IsFloat64():
+ return strconv.FormatFloat(v.Float64(), 'f', -1, 64)
+ case v.IsInt():
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case v.IsInt8():
+ return strconv.FormatInt(int64(v.Int8()), 10)
+ case v.IsInt16():
+ return strconv.FormatInt(int64(v.Int16()), 10)
+ case v.IsInt32():
+ return strconv.FormatInt(int64(v.Int32()), 10)
+ case v.IsInt64():
+ return strconv.FormatInt(v.Int64(), 10)
+ case v.IsUint():
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case v.IsUint8():
+ return strconv.FormatUint(uint64(v.Uint8()), 10)
+ case v.IsUint16():
+ return strconv.FormatUint(uint64(v.Uint16()), 10)
+ case v.IsUint32():
+ return strconv.FormatUint(uint64(v.Uint32()), 10)
+ case v.IsUint64():
+ return strconv.FormatUint(v.Uint64(), 10)
+ }
+ return fmt.Sprintf("%#v", v.Data())
+}
+
+// StringSlice returns the value always as a []string
+func (v *Value) StringSlice(optionalDefault ...[]string) []string {
+ switch {
+ case v.IsStrSlice():
+ return v.MustStrSlice()
+ case v.IsBoolSlice():
+ slice := v.MustBoolSlice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatBool(iv)
+ }
+ return vals
+ case v.IsFloat32Slice():
+ slice := v.MustFloat32Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatFloat(float64(iv), 'f', -1, 32)
+ }
+ return vals
+ case v.IsFloat64Slice():
+ slice := v.MustFloat64Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatFloat(iv, 'f', -1, 64)
+ }
+ return vals
+ case v.IsIntSlice():
+ slice := v.MustIntSlice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt8Slice():
+ slice := v.MustInt8Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt16Slice():
+ slice := v.MustInt16Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt32Slice():
+ slice := v.MustInt32Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt64Slice():
+ slice := v.MustInt64Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(iv, 10)
+ }
+ return vals
+ case v.IsUintSlice():
+ slice := v.MustUintSlice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint8Slice():
+ slice := v.MustUint8Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint16Slice():
+ slice := v.MustUint16Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint32Slice():
+ slice := v.MustUint32Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint64Slice():
+ slice := v.MustUint64Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(iv, 10)
+ }
+ return vals
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+
+ return []string{}
+}
diff --git a/test/integration/vendor/github.com/stretchr/testify/mock/doc.go b/test/integration/vendor/github.com/stretchr/testify/mock/doc.go
new file mode 100644
index 000000000..7324128ef
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/testify/mock/doc.go
@@ -0,0 +1,44 @@
+// Package mock provides a system by which it is possible to mock your objects
+// and verify calls are happening as expected.
+//
+// Example Usage
+//
+// The mock package provides an object, Mock, that tracks activity on another object. It is usually
+// embedded into a test object as shown below:
+//
+// type MyTestObject struct {
+// // add a Mock object instance
+// mock.Mock
+//
+// // other fields go here as normal
+// }
+//
+// When implementing the methods of an interface, you wire your functions up
+// to call the Mock.Called(args...) method, and return the appropriate values.
+//
+// For example, to mock a method that saves the name and age of a person and returns
+// the year of their birth or an error, you might write this:
+//
+// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) {
+// args := o.Called(firstname, lastname, age)
+// return args.Int(0), args.Error(1)
+// }
+//
+// The Int, Error and Bool methods are examples of strongly typed getters that take the argument
+// index position. Given this argument list:
+//
+// (12, true, "Something")
+//
+// You could read them out strongly typed like this:
+//
+// args.Int(0)
+// args.Bool(1)
+// args.String(2)
+//
+// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion:
+//
+// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine)
+//
+// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those
+// cases you should check for nil first.
+package mock
diff --git a/test/integration/vendor/github.com/stretchr/testify/mock/mock.go b/test/integration/vendor/github.com/stretchr/testify/mock/mock.go
new file mode 100644
index 000000000..f0af8246c
--- /dev/null
+++ b/test/integration/vendor/github.com/stretchr/testify/mock/mock.go
@@ -0,0 +1,1098 @@
+package mock
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/pmezard/go-difflib/difflib"
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+)
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Logf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ FailNow()
+}
+
+/*
+ Call
+*/
+
+// Call represents a method call and is used for setting expectations,
+// as well as recording activity.
+type Call struct {
+ Parent *Mock
+
+ // The name of the method that was or will be called.
+ Method string
+
+ // Holds the arguments of the method.
+ Arguments Arguments
+
+ // Holds the arguments that should be returned when
+ // this method is called.
+ ReturnArguments Arguments
+
+ // Holds the caller info for the On() call
+ callerInfo []string
+
+ // The number of times to return the return arguments when setting
+ // expectations. 0 means to always return the value.
+ Repeatability int
+
+ // Amount of times this call has been called
+ totalCalls int
+
+ // Call to this method can be optional
+ optional bool
+
+ // Holds a channel that will be used to block the Return until it either
+ // receives a message or is closed. nil means it returns immediately.
+ WaitFor <-chan time.Time
+
+ waitTime time.Duration
+
+ // Holds a handler used to manipulate arguments content that are passed by
+ // reference. It's useful when mocking methods such as unmarshalers or
+ // decoders.
+ RunFn func(Arguments)
+
+ // PanicMsg holds msg to be used to mock panic on the function call
+ // if the PanicMsg is set to a non nil string the function call will panic
+ // irrespective of other settings
+ PanicMsg *string
+
+ // Calls which must be satisfied before this call can be
+ requires []*Call
+}
+
+func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call {
+ return &Call{
+ Parent: parent,
+ Method: methodName,
+ Arguments: methodArguments,
+ ReturnArguments: make([]interface{}, 0),
+ callerInfo: callerInfo,
+ Repeatability: 0,
+ WaitFor: nil,
+ RunFn: nil,
+ PanicMsg: nil,
+ }
+}
+
+func (c *Call) lock() {
+ c.Parent.mutex.Lock()
+}
+
+func (c *Call) unlock() {
+ c.Parent.mutex.Unlock()
+}
+
+// Return specifies the return arguments for the expectation.
+//
+// Mock.On("DoSomething").Return(errors.New("failed"))
+func (c *Call) Return(returnArguments ...interface{}) *Call {
+ c.lock()
+ defer c.unlock()
+
+ c.ReturnArguments = returnArguments
+
+ return c
+}
+
+// Panic specifies if the functon call should fail and the panic message
+//
+// Mock.On("DoSomething").Panic("test panic")
+func (c *Call) Panic(msg string) *Call {
+ c.lock()
+ defer c.unlock()
+
+ c.PanicMsg = &msg
+
+ return c
+}
+
+// Once indicates that that the mock should only return the value once.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
+func (c *Call) Once() *Call {
+ return c.Times(1)
+}
+
+// Twice indicates that that the mock should only return the value twice.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice()
+func (c *Call) Twice() *Call {
+ return c.Times(2)
+}
+
+// Times indicates that that the mock should only return the indicated number
+// of times.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5)
+func (c *Call) Times(i int) *Call {
+ c.lock()
+ defer c.unlock()
+ c.Repeatability = i
+ return c
+}
+
+// WaitUntil sets the channel that will block the mock's return until its closed
+// or a message is received.
+//
+// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second))
+func (c *Call) WaitUntil(w <-chan time.Time) *Call {
+ c.lock()
+ defer c.unlock()
+ c.WaitFor = w
+ return c
+}
+
+// After sets how long to block until the call returns
+//
+// Mock.On("MyMethod", arg1, arg2).After(time.Second)
+func (c *Call) After(d time.Duration) *Call {
+ c.lock()
+ defer c.unlock()
+ c.waitTime = d
+ return c
+}
+
+// Run sets a handler to be called before returning. It can be used when
+// mocking a method (such as an unmarshaler) that takes a pointer to a struct and
+// sets properties in such struct
+//
+// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) {
+// arg := args.Get(0).(*map[string]interface{})
+// arg["foo"] = "bar"
+// })
+func (c *Call) Run(fn func(args Arguments)) *Call {
+ c.lock()
+ defer c.unlock()
+ c.RunFn = fn
+ return c
+}
+
+// Maybe allows the method call to be optional. Not calling an optional method
+// will not cause an error while asserting expectations
+func (c *Call) Maybe() *Call {
+ c.lock()
+ defer c.unlock()
+ c.optional = true
+ return c
+}
+
+// On chains a new expectation description onto the mocked interface. This
+// allows syntax like.
+//
+// Mock.
+// On("MyMethod", 1).Return(nil).
+// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error"))
+//go:noinline
+func (c *Call) On(methodName string, arguments ...interface{}) *Call {
+ return c.Parent.On(methodName, arguments...)
+}
+
+// Unset removes a mock handler from being called.
+// test.On("func", mock.Anything).Unset()
+func (c *Call) Unset() *Call {
+ var unlockOnce sync.Once
+
+ for _, arg := range c.Arguments {
+ if v := reflect.ValueOf(arg); v.Kind() == reflect.Func {
+ panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg))
+ }
+ }
+
+ c.lock()
+ defer unlockOnce.Do(c.unlock)
+
+ foundMatchingCall := false
+
+ for i, call := range c.Parent.ExpectedCalls {
+ if call.Method == c.Method {
+ _, diffCount := call.Arguments.Diff(c.Arguments)
+ if diffCount == 0 {
+ foundMatchingCall = true
+ // Remove from ExpectedCalls
+ c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...)
+ }
+ }
+ }
+
+ if !foundMatchingCall {
+ unlockOnce.Do(c.unlock)
+ c.Parent.fail("\n\nmock: Could not find expected call\n-----------------------------\n\n%s\n\n",
+ callString(c.Method, c.Arguments, true),
+ )
+ }
+
+ return c
+}
+
+// NotBefore indicates that the mock should only be called after the referenced
+// calls have been called as expected. The referenced calls may be from the
+// same mock instance and/or other mock instances.
+//
+// Mock.On("Do").Return(nil).Notbefore(
+// Mock.On("Init").Return(nil)
+// )
+func (c *Call) NotBefore(calls ...*Call) *Call {
+ c.lock()
+ defer c.unlock()
+
+ for _, call := range calls {
+ if call.Parent == nil {
+ panic("not before calls must be created with Mock.On()")
+ }
+ }
+
+ c.requires = append(c.requires, calls...)
+ return c
+}
+
+// Mock is the workhorse used to track activity on another object.
+// For an example of its usage, refer to the "Example Usage" section at the top
+// of this document.
+type Mock struct {
+ // Represents the calls that are expected of
+ // an object.
+ ExpectedCalls []*Call
+
+ // Holds the calls that were made to this mocked object.
+ Calls []Call
+
+ // test is An optional variable that holds the test struct, to be used when an
+ // invalid mock call was made.
+ test TestingT
+
+ // TestData holds any data that might be useful for testing. Testify ignores
+ // this data completely allowing you to do whatever you like with it.
+ testData objx.Map
+
+ mutex sync.Mutex
+}
+
+// String provides a %v format string for Mock.
+// Note: this is used implicitly by Arguments.Diff if a Mock is passed.
+// It exists because go's default %v formatting traverses the struct
+// without acquiring the mutex, which is detected by go test -race.
+func (m *Mock) String() string {
+ return fmt.Sprintf("%[1]T<%[1]p>", m)
+}
+
+// TestData holds any data that might be useful for testing. Testify ignores
+// this data completely allowing you to do whatever you like with it.
+func (m *Mock) TestData() objx.Map {
+ if m.testData == nil {
+ m.testData = make(objx.Map)
+ }
+
+ return m.testData
+}
+
+/*
+ Setting expectations
+*/
+
+// Test sets the test struct variable of the mock object
+func (m *Mock) Test(t TestingT) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ m.test = t
+}
+
+// fail fails the current test with the given formatted format and args.
+// In case that a test was defined, it uses the test APIs for failing a test,
+// otherwise it uses panic.
+func (m *Mock) fail(format string, args ...interface{}) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ if m.test == nil {
+ panic(fmt.Sprintf(format, args...))
+ }
+ m.test.Errorf(format, args...)
+ m.test.FailNow()
+}
+
+// On starts a description of an expectation of the specified method
+// being called.
+//
+// Mock.On("MyMethod", arg1, arg2)
+func (m *Mock) On(methodName string, arguments ...interface{}) *Call {
+ for _, arg := range arguments {
+ if v := reflect.ValueOf(arg); v.Kind() == reflect.Func {
+ panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg))
+ }
+ }
+
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ c := newCall(m, methodName, assert.CallerInfo(), arguments...)
+ m.ExpectedCalls = append(m.ExpectedCalls, c)
+ return c
+}
+
+// /*
+// Recording and responding to activity
+// */
+
+func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) {
+ var expectedCall *Call
+
+ for i, call := range m.ExpectedCalls {
+ if call.Method == method {
+ _, diffCount := call.Arguments.Diff(arguments)
+ if diffCount == 0 {
+ expectedCall = call
+ if call.Repeatability > -1 {
+ return i, call
+ }
+ }
+ }
+ }
+
+ return -1, expectedCall
+}
+
+type matchCandidate struct {
+ call *Call
+ mismatch string
+ diffCount int
+}
+
+func (c matchCandidate) isBetterMatchThan(other matchCandidate) bool {
+ if c.call == nil {
+ return false
+ }
+ if other.call == nil {
+ return true
+ }
+
+ if c.diffCount > other.diffCount {
+ return false
+ }
+ if c.diffCount < other.diffCount {
+ return true
+ }
+
+ if c.call.Repeatability > 0 && other.call.Repeatability <= 0 {
+ return true
+ }
+ return false
+}
+
+func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) {
+ var bestMatch matchCandidate
+
+ for _, call := range m.expectedCalls() {
+ if call.Method == method {
+
+ errInfo, tempDiffCount := call.Arguments.Diff(arguments)
+ tempCandidate := matchCandidate{
+ call: call,
+ mismatch: errInfo,
+ diffCount: tempDiffCount,
+ }
+ if tempCandidate.isBetterMatchThan(bestMatch) {
+ bestMatch = tempCandidate
+ }
+ }
+ }
+
+ return bestMatch.call, bestMatch.mismatch
+}
+
+func callString(method string, arguments Arguments, includeArgumentValues bool) string {
+ var argValsString string
+ if includeArgumentValues {
+ var argVals []string
+ for argIndex, arg := range arguments {
+ argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg))
+ }
+ argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t"))
+ }
+
+ return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString)
+}
+
+// Called tells the mock object that a method has been called, and gets an array
+// of arguments to return. Panics if the call is unexpected (i.e. not preceded by
+// appropriate .On .Return() calls)
+// If Call.WaitFor is set, blocks until the channel is closed or receives a message.
+func (m *Mock) Called(arguments ...interface{}) Arguments {
+ // get the calling function's name
+ pc, _, _, ok := runtime.Caller(1)
+ if !ok {
+ panic("Couldn't get the caller information")
+ }
+ functionPath := runtime.FuncForPC(pc).Name()
+ // Next four lines are required to use GCCGO function naming conventions.
+ // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
+ // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
+ // With GCCGO we need to remove interface information starting from pN.
+ re := regexp.MustCompile("\\.pN\\d+_")
+ if re.MatchString(functionPath) {
+ functionPath = re.Split(functionPath, -1)[0]
+ }
+ parts := strings.Split(functionPath, ".")
+ functionName := parts[len(parts)-1]
+ return m.MethodCalled(functionName, arguments...)
+}
+
+// MethodCalled tells the mock object that the given method has been called, and gets
+// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded
+// by appropriate .On .Return() calls)
+// If Call.WaitFor is set, blocks until the channel is closed or receives a message.
+func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments {
+ m.mutex.Lock()
+ // TODO: could combine expected and closes in single loop
+ found, call := m.findExpectedCall(methodName, arguments...)
+
+ if found < 0 {
+ // expected call found but it has already been called with repeatable times
+ if call != nil {
+ m.mutex.Unlock()
+ m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo())
+ }
+ // we have to fail here - because we don't know what to do
+ // as the return arguments. This is because:
+ //
+ // a) this is a totally unexpected call to this method,
+ // b) the arguments are not what was expected, or
+ // c) the developer has forgotten to add an accompanying On...Return pair.
+ closestCall, mismatch := m.findClosestCall(methodName, arguments...)
+ m.mutex.Unlock()
+
+ if closestCall != nil {
+ m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s",
+ callString(methodName, arguments, true),
+ callString(methodName, closestCall.Arguments, true),
+ diffArguments(closestCall.Arguments, arguments),
+ strings.TrimSpace(mismatch),
+ )
+ } else {
+ m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())
+ }
+ }
+
+ for _, requirement := range call.requires {
+ if satisfied, _ := requirement.Parent.checkExpectation(requirement); !satisfied {
+ m.mutex.Unlock()
+ m.fail("mock: Unexpected Method Call\n-----------------------------\n\n%s\n\nMust not be called before%s:\n\n%s",
+ callString(call.Method, call.Arguments, true),
+ func() (s string) {
+ if requirement.totalCalls > 0 {
+ s = " another call of"
+ }
+ if call.Parent != requirement.Parent {
+ s += " method from another mock instance"
+ }
+ return
+ }(),
+ callString(requirement.Method, requirement.Arguments, true),
+ )
+ }
+ }
+
+ if call.Repeatability == 1 {
+ call.Repeatability = -1
+ } else if call.Repeatability > 1 {
+ call.Repeatability--
+ }
+ call.totalCalls++
+
+ // add the call
+ m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...))
+ m.mutex.Unlock()
+
+ // block if specified
+ if call.WaitFor != nil {
+ <-call.WaitFor
+ } else {
+ time.Sleep(call.waitTime)
+ }
+
+ m.mutex.Lock()
+ panicMsg := call.PanicMsg
+ m.mutex.Unlock()
+ if panicMsg != nil {
+ panic(*panicMsg)
+ }
+
+ m.mutex.Lock()
+ runFn := call.RunFn
+ m.mutex.Unlock()
+
+ if runFn != nil {
+ runFn(arguments)
+ }
+
+ m.mutex.Lock()
+ returnArgs := call.ReturnArguments
+ m.mutex.Unlock()
+
+ return returnArgs
+}
+
+/*
+ Assertions
+*/
+
+type assertExpectationser interface {
+ AssertExpectations(TestingT) bool
+}
+
+// AssertExpectationsForObjects asserts that everything specified with On and Return
+// of the specified objects was in fact called as expected.
+//
+// Calls may have occurred in any order.
+func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ for _, obj := range testObjects {
+ if m, ok := obj.(*Mock); ok {
+ t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)")
+ obj = m
+ }
+ m := obj.(assertExpectationser)
+ if !m.AssertExpectations(t) {
+ t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m))
+ return false
+ }
+ }
+ return true
+}
+
+// AssertExpectations asserts that everything specified with On and Return was
+// in fact called as expected. Calls may have occurred in any order.
+func (m *Mock) AssertExpectations(t TestingT) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ var failedExpectations int
+
+ // iterate through each expectation
+ expectedCalls := m.expectedCalls()
+ for _, expectedCall := range expectedCalls {
+ satisfied, reason := m.checkExpectation(expectedCall)
+ if !satisfied {
+ failedExpectations++
+ }
+ t.Logf(reason)
+ }
+
+ if failedExpectations != 0 {
+ t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo())
+ }
+
+ return failedExpectations == 0
+}
+
+func (m *Mock) checkExpectation(call *Call) (bool, string) {
+ if !call.optional && !m.methodWasCalled(call.Method, call.Arguments) && call.totalCalls == 0 {
+ return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo)
+ }
+ if call.Repeatability > 0 {
+ return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo)
+ }
+ return true, fmt.Sprintf("PASS:\t%s(%s)", call.Method, call.Arguments.String())
+}
+
+// AssertNumberOfCalls asserts that the method was called expectedCalls times.
+func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ var actualCalls int
+ for _, call := range m.calls() {
+ if call.Method == methodName {
+ actualCalls++
+ }
+ }
+ return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls))
+}
+
+// AssertCalled asserts that the method was called.
+// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method.
+func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if !m.methodWasCalled(methodName, arguments) {
+ var calledWithArgs []string
+ for _, call := range m.calls() {
+ calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments))
+ }
+ if len(calledWithArgs) == 0 {
+ return assert.Fail(t, "Should have called with given arguments",
+ fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments))
+ }
+ return assert.Fail(t, "Should have called with given arguments",
+ fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n")))
+ }
+ return true
+}
+
+// AssertNotCalled asserts that the method was not called.
+// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method.
+func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if m.methodWasCalled(methodName, arguments) {
+ return assert.Fail(t, "Should not have called with given arguments",
+ fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments))
+ }
+ return true
+}
+
+// IsMethodCallable checking that the method can be called
+// If the method was called more than `Repeatability` return false
+func (m *Mock) IsMethodCallable(t TestingT, methodName string, arguments ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ for _, v := range m.ExpectedCalls {
+ if v.Method != methodName {
+ continue
+ }
+ if len(arguments) != len(v.Arguments) {
+ continue
+ }
+ if v.Repeatability < v.totalCalls {
+ continue
+ }
+ if isArgsEqual(v.Arguments, arguments) {
+ return true
+ }
+ }
+ return false
+}
+
+// isArgsEqual compares arguments
+func isArgsEqual(expected Arguments, args []interface{}) bool {
+ if len(expected) != len(args) {
+ return false
+ }
+ for i, v := range args {
+ if !reflect.DeepEqual(expected[i], v) {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool {
+ for _, call := range m.calls() {
+ if call.Method == methodName {
+
+ _, differences := Arguments(expected).Diff(call.Arguments)
+
+ if differences == 0 {
+ // found the expected call
+ return true
+ }
+
+ }
+ }
+ // we didn't find the expected call
+ return false
+}
+
+func (m *Mock) expectedCalls() []*Call {
+ return append([]*Call{}, m.ExpectedCalls...)
+}
+
+func (m *Mock) calls() []Call {
+ return append([]Call{}, m.Calls...)
+}
+
+/*
+ Arguments
+*/
+
+// Arguments holds an array of method arguments or return values.
+type Arguments []interface{}
+
+const (
+ // Anything is used in Diff and Assert when the argument being tested
+ // shouldn't be taken into consideration.
+ Anything = "mock.Anything"
+)
+
+// AnythingOfTypeArgument is a string that contains the type of an argument
+// for use when type checking. Used in Diff and Assert.
+type AnythingOfTypeArgument string
+
+// AnythingOfType returns an AnythingOfTypeArgument object containing the
+// name of the type to check for. Used in Diff and Assert.
+//
+// For example:
+// Assert(t, AnythingOfType("string"), AnythingOfType("int"))
+func AnythingOfType(t string) AnythingOfTypeArgument {
+ return AnythingOfTypeArgument(t)
+}
+
+// IsTypeArgument is a struct that contains the type of an argument
+// for use when type checking. This is an alternative to AnythingOfType.
+// Used in Diff and Assert.
+type IsTypeArgument struct {
+ t interface{}
+}
+
+// IsType returns an IsTypeArgument object containing the type to check for.
+// You can provide a zero-value of the type to check. This is an
+// alternative to AnythingOfType. Used in Diff and Assert.
+//
+// For example:
+// Assert(t, IsType(""), IsType(0))
+func IsType(t interface{}) *IsTypeArgument {
+ return &IsTypeArgument{t: t}
+}
+
+// argumentMatcher performs custom argument matching, returning whether or
+// not the argument is matched by the expectation fixture function.
+type argumentMatcher struct {
+ // fn is a function which accepts one argument, and returns a bool.
+ fn reflect.Value
+}
+
+func (f argumentMatcher) Matches(argument interface{}) bool {
+ expectType := f.fn.Type().In(0)
+ expectTypeNilSupported := false
+ switch expectType.Kind() {
+ case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr:
+ expectTypeNilSupported = true
+ }
+
+ argType := reflect.TypeOf(argument)
+ var arg reflect.Value
+ if argType == nil {
+ arg = reflect.New(expectType).Elem()
+ } else {
+ arg = reflect.ValueOf(argument)
+ }
+
+ if argType == nil && !expectTypeNilSupported {
+ panic(errors.New("attempting to call matcher with nil for non-nil expected type"))
+ }
+ if argType == nil || argType.AssignableTo(expectType) {
+ result := f.fn.Call([]reflect.Value{arg})
+ return result[0].Bool()
+ }
+ return false
+}
+
+func (f argumentMatcher) String() string {
+ return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).String())
+}
+
+// MatchedBy can be used to match a mock call based on only certain properties
+// from a complex struct or some calculation. It takes a function that will be
+// evaluated with the called argument and will return true when there's a match
+// and false otherwise.
+//
+// Example:
+// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" }))
+//
+// |fn|, must be a function accepting a single argument (of the expected type)
+// which returns a bool. If |fn| doesn't match the required signature,
+// MatchedBy() panics.
+func MatchedBy(fn interface{}) argumentMatcher {
+ fnType := reflect.TypeOf(fn)
+
+ if fnType.Kind() != reflect.Func {
+ panic(fmt.Sprintf("assert: arguments: %s is not a func", fn))
+ }
+ if fnType.NumIn() != 1 {
+ panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn))
+ }
+ if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool {
+ panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn))
+ }
+
+ return argumentMatcher{fn: reflect.ValueOf(fn)}
+}
+
+// Get Returns the argument at the specified index.
+func (args Arguments) Get(index int) interface{} {
+ if index+1 > len(args) {
+ panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args)))
+ }
+ return args[index]
+}
+
+// Is gets whether the objects match the arguments specified.
+func (args Arguments) Is(objects ...interface{}) bool {
+ for i, obj := range args {
+ if obj != objects[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Diff gets a string describing the differences between the arguments
+// and the specified objects.
+//
+// Returns the diff string and number of differences found.
+func (args Arguments) Diff(objects []interface{}) (string, int) {
+ // TODO: could return string as error and nil for No difference
+
+ output := "\n"
+ var differences int
+
+ maxArgCount := len(args)
+ if len(objects) > maxArgCount {
+ maxArgCount = len(objects)
+ }
+
+ for i := 0; i < maxArgCount; i++ {
+ var actual, expected interface{}
+ var actualFmt, expectedFmt string
+
+ if len(objects) <= i {
+ actual = "(Missing)"
+ actualFmt = "(Missing)"
+ } else {
+ actual = objects[i]
+ actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual)
+ }
+
+ if len(args) <= i {
+ expected = "(Missing)"
+ expectedFmt = "(Missing)"
+ } else {
+ expected = args[i]
+ expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected)
+ }
+
+ if matcher, ok := expected.(argumentMatcher); ok {
+ var matches bool
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ actualFmt = fmt.Sprintf("panic in argument matcher: %v", r)
+ }
+ }()
+ matches = matcher.Matches(actual)
+ }()
+ if matches {
+ output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher)
+ } else {
+ differences++
+ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher)
+ }
+ } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
+ // type checking
+ if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
+ // not match
+ differences++
+ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt)
+ }
+ } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) {
+ t := expected.(*IsTypeArgument).t
+ if reflect.TypeOf(t) != reflect.TypeOf(actual) {
+ differences++
+ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt)
+ }
+ } else {
+ // normal checking
+
+ if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
+ // match
+ output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt)
+ } else {
+ // not match
+ differences++
+ output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt)
+ }
+ }
+
+ }
+
+ if differences == 0 {
+ return "No differences.", differences
+ }
+
+ return output, differences
+}
+
+// Assert compares the arguments with the specified objects and fails if
+// they do not exactly match.
+func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ // get the differences
+ diff, diffCount := args.Diff(objects)
+
+ if diffCount == 0 {
+ return true
+ }
+
+ // there are differences... report them...
+ t.Logf(diff)
+ t.Errorf("%sArguments do not match.", assert.CallerInfo())
+
+ return false
+}
+
+// String gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+//
+// If no index is provided, String() returns a complete string representation
+// of the arguments.
+func (args Arguments) String(indexOrNil ...int) string {
+ if len(indexOrNil) == 0 {
+ // normal String() method - return a string representation of the args
+ var argsStr []string
+ for _, arg := range args {
+ argsStr = append(argsStr, fmt.Sprintf("%T", arg)) // handles nil nicely
+ }
+ return strings.Join(argsStr, ",")
+ } else if len(indexOrNil) == 1 {
+ // Index has been specified - get the argument at that index
+ index := indexOrNil[0]
+ var s string
+ var ok bool
+ if s, ok = args.Get(index).(string); !ok {
+ panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
+ }
+ return s
+ }
+
+ panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil)))
+}
+
+// Int gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+func (args Arguments) Int(index int) int {
+ var s int
+ var ok bool
+ if s, ok = args.Get(index).(int); !ok {
+ panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ }
+ return s
+}
+
+// Error gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+func (args Arguments) Error(index int) error {
+ obj := args.Get(index)
+ var s error
+ var ok bool
+ if obj == nil {
+ return nil
+ }
+ if s, ok = obj.(error); !ok {
+ panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ }
+ return s
+}
+
+// Bool gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+func (args Arguments) Bool(index int) bool {
+ var s bool
+ var ok bool
+ if s, ok = args.Get(index).(bool); !ok {
+ panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ }
+ return s
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+
+ if k == reflect.Ptr {
+ t = t.Elem()
+ k = t.Kind()
+ }
+ return t, k
+}
+
+func diffArguments(expected Arguments, actual Arguments) string {
+ if len(expected) != len(actual) {
+ return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual))
+ }
+
+ for x := range expected {
+ if diffString := diff(expected[x], actual[x]); diffString != "" {
+ return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString)
+ }
+ }
+
+ return ""
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice or array. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+ if expected == nil || actual == nil {
+ return ""
+ }
+
+ et, ek := typeAndKind(expected)
+ at, _ := typeAndKind(actual)
+
+ if et != at {
+ return ""
+ }
+
+ if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array {
+ return ""
+ }
+
+ e := spewConfig.Sdump(expected)
+ a := spewConfig.Sdump(actual)
+
+ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(e),
+ B: difflib.SplitLines(a),
+ FromFile: "Expected",
+ FromDate: "",
+ ToFile: "Actual",
+ ToDate: "",
+ Context: 1,
+ })
+
+ return diff
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
+
+type tHelper interface {
+ Helper()
+}
diff --git a/test/integration/vendor/github.com/subosito/gotenv/.env b/test/integration/vendor/github.com/subosito/gotenv/.env
new file mode 100644
index 000000000..6405eca71
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/.env
@@ -0,0 +1 @@
+HELLO=world
diff --git a/test/integration/vendor/github.com/subosito/gotenv/.env.invalid b/test/integration/vendor/github.com/subosito/gotenv/.env.invalid
new file mode 100644
index 000000000..016d5e0ce
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/.env.invalid
@@ -0,0 +1 @@
+lol$wut
diff --git a/test/integration/vendor/github.com/subosito/gotenv/.gitignore b/test/integration/vendor/github.com/subosito/gotenv/.gitignore
new file mode 100644
index 000000000..7db37c1db
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/.gitignore
@@ -0,0 +1,4 @@
+*.test
+*.out
+annotate.json
+profile.cov
diff --git a/test/integration/vendor/github.com/subosito/gotenv/.golangci.yaml b/test/integration/vendor/github.com/subosito/gotenv/.golangci.yaml
new file mode 100644
index 000000000..8c82a762e
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/.golangci.yaml
@@ -0,0 +1,7 @@
+# Options for analysis running.
+run:
+ timeout: 1m
+
+linters-settings:
+ gofmt:
+ simplify: true
diff --git a/test/integration/vendor/github.com/subosito/gotenv/CHANGELOG.md b/test/integration/vendor/github.com/subosito/gotenv/CHANGELOG.md
new file mode 100644
index 000000000..757caad26
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/CHANGELOG.md
@@ -0,0 +1,68 @@
+# Changelog
+
+## [1.4.0] - 2022-06-02
+
+### Added
+
+- Add `Marshal` and `Unmarshal` helpers
+
+### Changed
+
+- The CI will now run a linter and the tests on PRs.
+
+## [1.3.0] - 2022-05-23
+
+### Added
+
+- Support = within double-quoted strings
+- Add support for multiline values
+
+### Changed
+
+- `OverLoad` prefer environment variables over local variables
+
+## [1.2.0] - 2019-08-03
+
+### Added
+
+- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`.
+- Add more tests to be 100% coverage.
+- Add CHANGELOG
+- Add more OS for the test: OSX and Windows
+
+### Changed
+
+- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv).
+- Updated README with mentions to all available functions
+
+### Removed
+
+- Remove `ErrFormat`
+- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper.
+
+## [1.1.1] - 2018-06-05
+
+### Changed
+
+- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding)
+
+## [1.1.0] - 2017-03-20
+
+### Added
+
+- Supports carriage return in env
+- Handle files with UTF-8 BOM
+
+### Changed
+
+- Whitespace handling
+
+### Fixed
+
+- Incorrect variable expansion
+- Handling escaped '$' characters
+
+## [1.0.0] - 2014-10-05
+
+First stable release.
+
diff --git a/test/integration/vendor/github.com/subosito/gotenv/LICENSE b/test/integration/vendor/github.com/subosito/gotenv/LICENSE
new file mode 100644
index 000000000..f64ccaedc
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Alif Rachmawadi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/test/integration/vendor/github.com/subosito/gotenv/README.md b/test/integration/vendor/github.com/subosito/gotenv/README.md
new file mode 100644
index 000000000..fc9616e3b
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/README.md
@@ -0,0 +1,129 @@
+# gotenv
+
+[](https://github.com/subosito/gotenv/actions)
+[](https://codecov.io/gh/subosito/gotenv)
+[](https://goreportcard.com/report/github.com/subosito/gotenv)
+[](https://godoc.org/github.com/subosito/gotenv)
+
+Load environment variables from `.env` or `io.Reader` in Go.
+
+## Usage
+
+Put the gotenv package on your `import` statement:
+
+```go
+import "github.com/subosito/gotenv"
+```
+
+To modify your app environment variables, `gotenv` expose 2 main functions:
+
+- `gotenv.Load`
+- `gotenv.Apply`
+
+By default, `gotenv.Load` will look for a file called `.env` in the current working directory.
+
+Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function.
+
+Once loaded you can use `os.Getenv()` to get the value of the variable.
+
+Let's say you have `.env` file:
+
+```sh
+APP_ID=1234567
+APP_SECRET=abcdef
+```
+
+Here's the example of your app:
+
+```go
+package main
+
+import (
+ "github.com/subosito/gotenv"
+ "log"
+ "os"
+)
+
+func init() {
+ gotenv.Load()
+}
+
+func main() {
+ log.Println(os.Getenv("APP_ID")) // "1234567"
+ log.Println(os.Getenv("APP_SECRET")) // "abcdef"
+}
+```
+
+You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.:
+
+```go
+gotenv.Load(".env.production", "credentials")
+```
+
+While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`:
+
+```go
+gotenv.Apply(strings.NewReader("APP_ID=1234567"))
+
+log.Println(os.Getenv("APP_ID"))
+// Output: "1234567"
+```
+
+Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below.
+
+### Environment Overrides
+
+Besides above functions, `gotenv` also provides another functions that overrides existing:
+
+- `gotenv.OverLoad`
+- `gotenv.OverApply`
+
+Here's the example of this overrides behavior:
+
+```go
+os.Setenv("HELLO", "world")
+
+// NOTE: using Apply existing value will be reserved
+gotenv.Apply(strings.NewReader("HELLO=universe"))
+fmt.Println(os.Getenv("HELLO"))
+// Output: "world"
+
+// NOTE: using OverApply existing value will be overridden
+gotenv.OverApply(strings.NewReader("HELLO=universe"))
+fmt.Println(os.Getenv("HELLO"))
+// Output: "universe"
+```
+
+### Throw a Panic
+
+Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned.
+
+```go
+err := gotenv.Load(".env-is-not-exist")
+fmt.Println("error", err)
+// error: open .env-is-not-exist: no such file or directory
+
+gotenv.Must(gotenv.Load, ".env-is-not-exist")
+// it will throw a panic
+// panic: open .env-is-not-exist: no such file or directory
+```
+
+### Another Scenario
+
+Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that.
+
+```go
+// import "strings"
+
+pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO"))
+// gotenv.Env{"FOO": "test", "BAR": "test"}
+
+pairs, err := gotenv.StrictParse(strings.NewReader(`FOO="bar"`))
+// gotenv.Env{"FOO": "bar"}
+```
+
+`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines.
+
+## Notes
+
+The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible.
diff --git a/test/integration/vendor/github.com/subosito/gotenv/gotenv.go b/test/integration/vendor/github.com/subosito/gotenv/gotenv.go
new file mode 100644
index 000000000..7b1186e1f
--- /dev/null
+++ b/test/integration/vendor/github.com/subosito/gotenv/gotenv.go
@@ -0,0 +1,369 @@
+// Package gotenv provides functionality to dynamically load the environment variables
+package gotenv
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+const (
+ // Pattern for detecting valid line format
+ linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z`
+
+ // Pattern for detecting valid variable within a value
+ variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)`
+
+ // Byte order mark character
+ bom = "\xef\xbb\xbf"
+)
+
+// Env holds key/value pair of valid environment variable
+type Env map[string]string
+
+// Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist.
+// When it's called with no argument, it will load `.env` file on the current path and set the environment variables.
+// Otherwise, it will loop over the filenames parameter and set the proper environment variables.
+func Load(filenames ...string) error {
+ return loadenv(false, filenames...)
+}
+
+// OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables.
+func OverLoad(filenames ...string) error {
+ return loadenv(true, filenames...)
+}
+
+// Must is wrapper function that will panic when supplied function returns an error.
+func Must(fn func(filenames ...string) error, filenames ...string) {
+ if err := fn(filenames...); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist.
+func Apply(r io.Reader) error {
+ return parset(r, false)
+}
+
+// OverApply is a function to load an io Reader then export and override the valid variables into environment variables.
+func OverApply(r io.Reader) error {
+ return parset(r, true)
+}
+
+func loadenv(override bool, filenames ...string) error {
+ if len(filenames) == 0 {
+ filenames = []string{".env"}
+ }
+
+ for _, filename := range filenames {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+
+ err = parset(f, override)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parse and set :)
+func parset(r io.Reader, override bool) error {
+ env, err := strictParse(r, override)
+ if err != nil {
+ return err
+ }
+
+ for key, val := range env {
+ setenv(key, val, override)
+ }
+
+ return nil
+}
+
+func setenv(key, val string, override bool) {
+ if override {
+ os.Setenv(key, val)
+ } else {
+ if _, present := os.LookupEnv(key); !present {
+ os.Setenv(key, val)
+ }
+ }
+}
+
+// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables.
+// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
+// This function is skipping any invalid lines and only processing the valid one.
+func Parse(r io.Reader) Env {
+ env, _ := strictParse(r, false)
+ return env
+}
+
+// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables.
+// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
+// This function is returning an error if there are any invalid lines.
+func StrictParse(r io.Reader) (Env, error) {
+ return strictParse(r, false)
+}
+
+// Read is a function to parse a file line by line and returns the valid Env key/value pair of valid variables.
+// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
+// This function is skipping any invalid lines and only processing the valid one.
+func Read(filename string) (Env, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return strictParse(f, false)
+}
+
+// Unmarshal reads a string line by line and returns the valid Env key/value pair of valid variables.
+// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
+// This function is returning an error if there are any invalid lines.
+func Unmarshal(str string) (Env, error) {
+ return strictParse(strings.NewReader(str), false)
+}
+
+// Marshal outputs the given environment as a env file.
+// Variables will be sorted by name.
+func Marshal(env Env) (string, error) {
+ lines := make([]string, 0, len(env))
+ for k, v := range env {
+ if d, err := strconv.Atoi(v); err == nil {
+ lines = append(lines, fmt.Sprintf(`%s=%d`, k, d))
+ } else {
+ lines = append(lines, fmt.Sprintf(`%s=%q`, k, v))
+ }
+ }
+ sort.Strings(lines)
+ return strings.Join(lines, "\n"), nil
+}
+
+// Write serializes the given environment and writes it to a file
+func Write(env Env, filename string) error {
+ content, err := Marshal(env)
+ if err != nil {
+ return err
+ }
+ // ensure the path exists
+ if err := os.MkdirAll(filepath.Dir(filename), 0o775); err != nil {
+ return err
+ }
+ // create or truncate the file
+ file, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ _, err = file.WriteString(content + "\n")
+ if err != nil {
+ return err
+ }
+
+ return file.Sync()
+}
+
+func strictParse(r io.Reader, override bool) (Env, error) {
+ env := make(Env)
+ scanner := bufio.NewScanner(r)
+
+ firstLine := true
+
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+
+ if firstLine {
+ line = strings.TrimPrefix(line, bom)
+ firstLine = false
+ }
+
+ if line == "" || line[0] == '#' {
+ continue
+ }
+
+ quote := ""
+ // look for the delimiter character
+ idx := strings.Index(line, "=")
+ if idx == -1 {
+ idx = strings.Index(line, ":")
+ }
+ // look for a quote character
+ if idx > 0 && idx < len(line)-1 {
+ val := strings.TrimSpace(line[idx+1:])
+ if val[0] == '"' || val[0] == '\'' {
+ quote = val[:1]
+ // look for the closing quote character within the same line
+ idx = strings.LastIndex(strings.TrimSpace(val[1:]), quote)
+ if idx >= 0 && val[idx] != '\\' {
+ quote = ""
+ }
+ }
+ }
+ // look for the closing quote character
+ for quote != "" && scanner.Scan() {
+ l := scanner.Text()
+ line += "\n" + l
+ idx := strings.LastIndex(l, quote)
+ if idx > 0 && l[idx-1] == '\\' {
+ // foud a matching quote character but it's escaped
+ continue
+ }
+ if idx >= 0 {
+ // foud a matching quote
+ quote = ""
+ }
+ }
+
+ if quote != "" {
+ return env, fmt.Errorf("missing quotes")
+ }
+
+ err := parseLine(line, env, override)
+ if err != nil {
+ return env, err
+ }
+ }
+
+ return env, nil
+}
+
+var (
+ lineRgx = regexp.MustCompile(linePattern)
+ unescapeRgx = regexp.MustCompile(`\\([^$])`)
+ varRgx = regexp.MustCompile(variablePattern)
+)
+
+func parseLine(s string, env Env, override bool) error {
+ rm := lineRgx.FindStringSubmatch(s)
+
+ if len(rm) == 0 {
+ return checkFormat(s, env)
+ }
+
+ key := strings.TrimSpace(rm[1])
+ val := strings.TrimSpace(rm[2])
+
+ var hsq, hdq bool
+
+ // check if the value is quoted
+ if l := len(val); l >= 2 {
+ l -= 1
+ // has double quotes
+ hdq = val[0] == '"' && val[l] == '"'
+ // has single quotes
+ hsq = val[0] == '\'' && val[l] == '\''
+
+ // remove quotes '' or ""
+ if hsq || hdq {
+ val = val[1:l]
+ }
+ }
+
+ if hdq {
+ val = strings.ReplaceAll(val, `\n`, "\n")
+ val = strings.ReplaceAll(val, `\r`, "\r")
+
+ // Unescape all characters except $ so variables can be escaped properly
+ val = unescapeRgx.ReplaceAllString(val, "$1")
+ }
+
+ if !hsq {
+ fv := func(s string) string {
+ return varReplacement(s, hsq, env, override)
+ }
+ val = varRgx.ReplaceAllStringFunc(val, fv)
+ val = parseVal(val, env, hdq, override)
+ }
+
+ env[key] = val
+ return nil
+}
+
+func parseExport(st string, env Env) error {
+ if strings.HasPrefix(st, "export") {
+ vs := strings.SplitN(st, " ", 2)
+
+ if len(vs) > 1 {
+ if _, ok := env[vs[1]]; !ok {
+ return fmt.Errorf("line `%s` has an unset variable", st)
+ }
+ }
+ }
+
+ return nil
+}
+
+var varNameRgx = regexp.MustCompile(`(\$)(\{?([A-Z0-9_]+)\}?)`)
+
+func varReplacement(s string, hsq bool, env Env, override bool) string {
+ if s == "" {
+ return s
+ }
+
+ if s[0] == '\\' {
+ // the dollar sign is escaped
+ return s[1:]
+ }
+
+ if hsq {
+ return s
+ }
+
+ mn := varNameRgx.FindStringSubmatch(s)
+
+ if len(mn) == 0 {
+ return s
+ }
+
+ v := mn[3]
+
+ if replace, ok := os.LookupEnv(v); ok && !override {
+ return replace
+ }
+
+ if replace, ok := env[v]; ok {
+ return replace
+ }
+
+ return os.Getenv(v)
+}
+
+func checkFormat(s string, env Env) error {
+ st := strings.TrimSpace(s)
+
+ if st == "" || st[0] == '#' {
+ return nil
+ }
+
+ if err := parseExport(st, env); err != nil {
+ return err
+ }
+
+ return fmt.Errorf("line `%s` doesn't match format", s)
+}
+
+func parseVal(val string, env Env, ignoreNewlines bool, override bool) string {
+ if strings.Contains(val, "=") && !ignoreNewlines {
+ kv := strings.Split(val, "\r")
+
+ if len(kv) > 1 {
+ val = kv[0]
+ for _, l := range kv[1:] {
+ _ = parseLine(l, env, override)
+ }
+ }
+ }
+
+ return val
+}
diff --git a/test/integration/vendor/github.com/vardius/message-bus/.gitignore b/test/integration/vendor/github.com/vardius/message-bus/.gitignore
new file mode 100644
index 000000000..0c965afb8
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/.gitignore
@@ -0,0 +1,18 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# Editor
+.vscode/
+.idea/
diff --git a/test/integration/vendor/github.com/vardius/message-bus/.hound.yml b/test/integration/vendor/github.com/vardius/message-bus/.hound.yml
new file mode 100644
index 000000000..e5c719dd2
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/.hound.yml
@@ -0,0 +1,2 @@
+go:
+ enabled: true
diff --git a/test/integration/vendor/github.com/vardius/message-bus/.travis.yml b/test/integration/vendor/github.com/vardius/message-bus/.travis.yml
new file mode 100644
index 000000000..49ca6bbc6
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - "1.12"
+ - tip
+script:
+ - go build
+ - go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic
+ - go test -bench=. -cpu=4 -benchmem
+after_script:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/test/integration/vendor/github.com/vardius/message-bus/LICENSE.md b/test/integration/vendor/github.com/vardius/message-bus/LICENSE.md
new file mode 100644
index 000000000..55238c342
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-present Rafał Lorenz
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/test/integration/vendor/github.com/vardius/message-bus/README.md b/test/integration/vendor/github.com/vardius/message-bus/README.md
new file mode 100644
index 000000000..7ec0120b7
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/README.md
@@ -0,0 +1,61 @@
+🚌 message-bus
+================
+[](https://travis-ci.org/vardius/message-bus)
+[](https://goreportcard.com/report/github.com/vardius/message-bus)
+[](https://codecov.io/gh/vardius/message-bus)
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fvardius%2Fmessage-bus?ref=badge_shield)
+[](https://pkg.go.dev/github.com/vardius/message-bus)
+[](https://github.com/vardius/message-bus/blob/master/LICENSE.md)
+
+
+
+Go simple async message bus.
+
+📖 ABOUT
+==================================================
+Contributors:
+
+* [Rafał Lorenz](http://rafallorenz.com)
+
+Want to contribute ? Feel free to send pull requests!
+
+Have problems, bugs, feature ideas?
+We are using the github [issue tracker](https://github.com/vardius/message-bus/issues) to manage them.
+
+## 📚 Documentation
+
+For **documentation** (_including examples_), **visit [rafallorenz.com/message-bus](http://rafallorenz.com/message-bus)**
+
+For **GoDoc** reference, **visit [pkg.go.dev](https://pkg.go.dev/github.com/vardius/message-bus)**
+
+🚏 HOW TO USE
+==================================================
+
+## 🚅 Benchmark
+
+```bash
+➜ message-bus git:(master) ✗ go test -bench=. -cpu=4 -benchmem
+goos: darwin
+goarch: amd64
+pkg: github.com/vardius/message-bus
+BenchmarkPublish-4 4430224 250 ns/op 0 B/op 0 allocs/op
+BenchmarkSubscribe-4 598240 2037 ns/op 735 B/op 5 allocs/op
+```
+
+👉 **[Click here](https://rafallorenz.com/message-bus/docs/benchmark)** to see all benchmark results.
+
+## Features
+- [Documentation](https://rafallorenz.com/message-bus/)
+
+🚏 HOW TO USE
+==================================================
+
+- [Basic example](https://rafallorenz.com/message-bus/docs/basic-example)
+- [Pub/Sub](https://rafallorenz.com/message-bus/docs/pubsub)
+
+📜 [License](LICENSE.md)
+-------
+
+This package is released under the MIT license. See the complete license in the package:
+
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fvardius%2Fmessage-bus?ref=badge_large)
diff --git a/test/integration/vendor/github.com/vardius/message-bus/bus.go b/test/integration/vendor/github.com/vardius/message-bus/bus.go
new file mode 100644
index 000000000..47b3175c1
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/bus.go
@@ -0,0 +1,145 @@
+package messagebus
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// MessageBus implements publish/subscribe messaging paradigm
+type MessageBus interface {
+ // Publish publishes arguments to the given topic subscribers
+ // Publish block only when the buffer of one of the subscribers is full.
+ Publish(topic string, args ...interface{})
+ // Close unsubscribe all handlers from given topic
+ Close(topic string)
+ // Subscribe subscribes to the given topic
+ Subscribe(topic string, fn interface{}) error
+ // Unsubscribe unsubscribe handler from the given topic
+ Unsubscribe(topic string, fn interface{}) error
+}
+
+type handlersMap map[string][]*handler
+
+type handler struct {
+ callback reflect.Value
+ queue chan []reflect.Value
+}
+
+type messageBus struct {
+ handlerQueueSize int
+ mtx sync.RWMutex
+ handlers handlersMap
+}
+
+func (b *messageBus) Publish(topic string, args ...interface{}) {
+ rArgs := buildHandlerArgs(args)
+
+ b.mtx.RLock()
+ defer b.mtx.RUnlock()
+
+ if hs, ok := b.handlers[topic]; ok {
+ for _, h := range hs {
+ h.queue <- rArgs
+ }
+ }
+}
+
+func (b *messageBus) Subscribe(topic string, fn interface{}) error {
+ if err := isValidHandler(fn); err != nil {
+ return err
+ }
+
+ h := &handler{
+ callback: reflect.ValueOf(fn),
+ queue: make(chan []reflect.Value, b.handlerQueueSize),
+ }
+
+ go func() {
+ for args := range h.queue {
+ h.callback.Call(args)
+ }
+ }()
+
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+
+ b.handlers[topic] = append(b.handlers[topic], h)
+
+ return nil
+}
+
+func (b *messageBus) Unsubscribe(topic string, fn interface{}) error {
+ if err := isValidHandler(fn); err != nil {
+ return err
+ }
+
+ rv := reflect.ValueOf(fn)
+
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+
+ if _, ok := b.handlers[topic]; ok {
+ for i, h := range b.handlers[topic] {
+ if h.callback == rv {
+ close(h.queue)
+
+ if len(b.handlers[topic]) == 1 {
+ delete(b.handlers, topic)
+ } else {
+ b.handlers[topic] = append(b.handlers[topic][:i], b.handlers[topic][i+1:]...)
+ }
+ }
+ }
+
+ return nil
+ }
+
+ return fmt.Errorf("topic %s doesn't exist", topic)
+}
+
+func (b *messageBus) Close(topic string) {
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+
+ if _, ok := b.handlers[topic]; ok {
+ for _, h := range b.handlers[topic] {
+ close(h.queue)
+ }
+
+ delete(b.handlers, topic)
+
+ return
+ }
+}
+
+func isValidHandler(fn interface{}) error {
+ if reflect.TypeOf(fn).Kind() != reflect.Func {
+ return fmt.Errorf("%s is not a reflect.Func", reflect.TypeOf(fn))
+ }
+
+ return nil
+}
+
+func buildHandlerArgs(args []interface{}) []reflect.Value {
+ reflectedArgs := make([]reflect.Value, 0)
+
+ for _, arg := range args {
+ reflectedArgs = append(reflectedArgs, reflect.ValueOf(arg))
+ }
+
+ return reflectedArgs
+}
+
+// New creates new MessageBus
+// handlerQueueSize sets buffered channel length per subscriber
+func New(handlerQueueSize int) MessageBus {
+ if handlerQueueSize == 0 {
+ panic("handlerQueueSize has to be greater then 0")
+ }
+
+ return &messageBus{
+ handlerQueueSize: handlerQueueSize,
+ handlers: make(handlersMap),
+ }
+}
diff --git a/test/integration/vendor/github.com/vardius/message-bus/doc.go b/test/integration/vendor/github.com/vardius/message-bus/doc.go
new file mode 100644
index 000000000..14e972301
--- /dev/null
+++ b/test/integration/vendor/github.com/vardius/message-bus/doc.go
@@ -0,0 +1,4 @@
+/*
+Package messagebus provides simple async message publisher
+*/
+package messagebus
diff --git a/test/integration/vendor/golang.org/x/text/runes/cond.go b/test/integration/vendor/golang.org/x/text/runes/cond.go
new file mode 100644
index 000000000..df7aa02db
--- /dev/null
+++ b/test/integration/vendor/golang.org/x/text/runes/cond.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runes
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
+// This is done for various reasons:
+// - To retain the semantics of the Nop transformer: if input is passed to a Nop
+// one would expect it to be unchanged.
+// - It would be very expensive to pass a converted RuneError to a transformer:
+// a transformer might need more source bytes after RuneError, meaning that
+// the only way to pass it safely is to create a new buffer and manage the
+// intermingling of RuneErrors and normal input.
+// - Many transformers leave ill-formed UTF-8 as is, so this is not
+// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
+// logical consequence of the operation (as for Map) or if it otherwise would
+// pose security concerns (as for Remove).
+// - An alternative would be to return an error on ill-formed UTF-8, but this
+// would be inconsistent with other operations.
+
+// If returns a transformer that applies tIn to consecutive runes for which
+// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
+// is called on tIn and tNotIn at the start of each run. A Nop transformer will
+// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
+// to RuneError to determine which transformer to apply, but is passed as is to
+// the respective transformer.
+func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
+ if tIn == nil && tNotIn == nil {
+ return Transformer{transform.Nop}
+ }
+ if tIn == nil {
+ tIn = transform.Nop
+ }
+ if tNotIn == nil {
+ tNotIn = transform.Nop
+ }
+ sIn, ok := tIn.(transform.SpanningTransformer)
+ if !ok {
+ sIn = dummySpan{tIn}
+ }
+ sNotIn, ok := tNotIn.(transform.SpanningTransformer)
+ if !ok {
+ sNotIn = dummySpan{tNotIn}
+ }
+
+ a := &cond{
+ tIn: sIn,
+ tNotIn: sNotIn,
+ f: s.Contains,
+ }
+ a.Reset()
+ return Transformer{a}
+}
+
+type dummySpan struct{ transform.Transformer }
+
+func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
+ return 0, transform.ErrEndOfSpan
+}
+
+type cond struct {
+ tIn, tNotIn transform.SpanningTransformer
+ f func(rune) bool
+ check func(rune) bool // current check to perform
+ t transform.SpanningTransformer // current transformer to use
+}
+
+// Reset implements transform.Transformer.
+func (t *cond) Reset() {
+ t.check = t.is
+ t.t = t.tIn
+ t.t.Reset() // notIn will be reset on first usage.
+}
+
+func (t *cond) is(r rune) bool {
+ if t.f(r) {
+ return true
+ }
+ t.check = t.isNot
+ t.t = t.tNotIn
+ t.tNotIn.Reset()
+ return false
+}
+
+func (t *cond) isNot(r rune) bool {
+ if !t.f(r) {
+ return true
+ }
+ t.check = t.is
+ t.t = t.tIn
+ t.tIn.Reset()
+ return false
+}
+
+// This implementation of Span doesn't help all too much, but it needs to be
+// there to satisfy this package's Transformer interface.
+// TODO: there are certainly room for improvements, though. For example, if
+// t.t == transform.Nop (which will a common occurrence) it will save a bundle
+// to special-case that loop.
+func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
+ p := 0
+ for n < len(src) && err == nil {
+ // Don't process too much at a time as the Spanner that will be
+ // called on this block may terminate early.
+ const maxChunk = 4096
+ max := len(src)
+ if v := n + maxChunk; v < max {
+ max = v
+ }
+ atEnd := false
+ size := 0
+ current := t.t
+ for ; p < max; p += size {
+ r := rune(src[p])
+ if r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+ if !atEOF && !utf8.FullRune(src[p:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if !t.check(r) {
+ // The next rune will be the start of a new run.
+ atEnd = true
+ break
+ }
+ }
+ n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
+ n += n2
+ if err2 != nil {
+ return n, err2
+ }
+ // At this point either err != nil or t.check will pass for the rune at p.
+ p = n + size
+ }
+ return n, err
+}
+
+func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ p := 0
+ for nSrc < len(src) && err == nil {
+ // Don't process too much at a time, as the work might be wasted if the
+ // destination buffer isn't large enough to hold the result or a
+ // transform returns an error early.
+ const maxChunk = 4096
+ max := len(src)
+ if n := nSrc + maxChunk; n < len(src) {
+ max = n
+ }
+ atEnd := false
+ size := 0
+ current := t.t
+ for ; p < max; p += size {
+ r := rune(src[p])
+ if r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+ if !atEOF && !utf8.FullRune(src[p:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if !t.check(r) {
+ // The next rune will be the start of a new run.
+ atEnd = true
+ break
+ }
+ }
+ nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
+ nDst += nDst2
+ nSrc += nSrc2
+ if err2 != nil {
+ return nDst, nSrc, err2
+ }
+ // At this point either err != nil or t.check will pass for the rune at p.
+ p = nSrc + size
+ }
+ return nDst, nSrc, err
+}
diff --git a/test/integration/vendor/golang.org/x/text/runes/runes.go b/test/integration/vendor/golang.org/x/text/runes/runes.go
new file mode 100644
index 000000000..930e87fed
--- /dev/null
+++ b/test/integration/vendor/golang.org/x/text/runes/runes.go
@@ -0,0 +1,355 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package runes provide transforms for UTF-8 encoded text.
+package runes // import "golang.org/x/text/runes"
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// A Set is a collection of runes.
+type Set interface {
+ // Contains returns true if r is contained in the set.
+ Contains(r rune) bool
+}
+
+type setFunc func(rune) bool
+
+func (s setFunc) Contains(r rune) bool {
+ return s(r)
+}
+
+// Note: using funcs here instead of wrapping types result in cleaner
+// documentation and a smaller API.
+
+// In creates a Set with a Contains method that returns true for all runes in
+// the given RangeTable.
+func In(rt *unicode.RangeTable) Set {
+ return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
+}
+
+// NotIn creates a Set with a Contains method that returns true for all runes not
+// in the given RangeTable.
+func NotIn(rt *unicode.RangeTable) Set {
+ return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
+}
+
+// Predicate creates a Set with a Contains method that returns f(r).
+func Predicate(f func(rune) bool) Set {
+ return setFunc(f)
+}
+
+// Transformer implements the transform.Transformer interface.
+type Transformer struct {
+ t transform.SpanningTransformer
+}
+
+func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ return t.t.Transform(dst, src, atEOF)
+}
+
+func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
+ return t.t.Span(b, atEOF)
+}
+
+func (t Transformer) Reset() { t.t.Reset() }
+
+// Bytes returns a new byte slice with the result of converting b using t. It
+// calls Reset on t. It returns nil if any error was found. This can only happen
+// if an error-producing Transformer is passed to If.
+func (t Transformer) Bytes(b []byte) []byte {
+ b, _, err := transform.Bytes(t, b)
+ if err != nil {
+ return nil
+ }
+ return b
+}
+
+// String returns a string with the result of converting s using t. It calls
+// Reset on t. It returns the empty string if any error was found. This can only
+// happen if an error-producing Transformer is passed to If.
+func (t Transformer) String(s string) string {
+ s, _, err := transform.String(t, s)
+ if err != nil {
+ return ""
+ }
+ return s
+}
+
+// TODO:
+// - Copy: copying strings and bytes in whole-rune units.
+// - Validation (maybe)
+// - Well-formed-ness (maybe)
+
+const runeErrorString = string(utf8.RuneError)
+
+// Remove returns a Transformer that removes runes r for which s.Contains(r).
+// Illegal input bytes are replaced by RuneError before being passed to f.
+func Remove(s Set) Transformer {
+ if f, ok := s.(setFunc); ok {
+ // This little trick cuts the running time of BenchmarkRemove for sets
+ // created by Predicate roughly in half.
+ // TODO: special-case RangeTables as well.
+ return Transformer{remove(f)}
+ }
+ return Transformer{remove(s.Contains)}
+}
+
+// TODO: remove transform.RemoveFunc.
+
+type remove func(r rune) bool
+
+func (remove) Reset() {}
+
+// Span implements transform.Spanner.
+func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
+ for r, size := rune(0), 0; n < len(src); {
+ if r = rune(src[n]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ } else {
+ err = transform.ErrEndOfSpan
+ }
+ break
+ }
+ if t(r) {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ n += size
+ }
+ return
+}
+
+// Transform implements transform.Transformer.
+func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for r, size := rune(0), 0; nSrc < len(src); {
+ if r = rune(src[nSrc]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ // We replace illegal bytes with RuneError. Not doing so might
+ // otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
+ // The resulting byte sequence may subsequently contain runes
+ // for which t(r) is true that were passed unnoticed.
+ if !t(utf8.RuneError) {
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ }
+ nSrc++
+ continue
+ }
+ if t(r) {
+ nSrc += size
+ continue
+ }
+ if nDst+size > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < size; i++ {
+ dst[nDst] = src[nSrc]
+ nDst++
+ nSrc++
+ }
+ }
+ return
+}
+
+// Map returns a Transformer that maps the runes in the input using the given
+// mapping. Illegal bytes in the input are converted to utf8.RuneError before
+// being passed to the mapping func.
+func Map(mapping func(rune) rune) Transformer {
+ return Transformer{mapper(mapping)}
+}
+
+type mapper func(rune) rune
+
+func (mapper) Reset() {}
+
+// Span implements transform.Spanner.
+func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
+ for r, size := rune(0), 0; n < len(src); n += size {
+ if r = rune(src[n]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ } else {
+ err = transform.ErrEndOfSpan
+ }
+ break
+ }
+ if t(r) != r {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ }
+ return n, err
+}
+
+// Transform implements transform.Transformer.
+func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ var replacement rune
+ var b [utf8.UTFMax]byte
+
+ for r, size := rune(0), 0; nSrc < len(src); {
+ if r = rune(src[nSrc]); r < utf8.RuneSelf {
+ if replacement = t(r); replacement < utf8.RuneSelf {
+ if nDst == len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst] = byte(replacement)
+ nDst++
+ nSrc++
+ continue
+ }
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ nSrc++
+ continue
+ }
+ } else if replacement = t(r); replacement == r {
+ if nDst+size > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < size; i++ {
+ dst[nDst] = src[nSrc]
+ nDst++
+ nSrc++
+ }
+ continue
+ }
+
+ n := utf8.EncodeRune(b[:], replacement)
+
+ if nDst+n > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < n; i++ {
+ dst[nDst] = b[i]
+ nDst++
+ }
+ nSrc += size
+ }
+ return
+}
+
+// ReplaceIllFormed returns a transformer that replaces all input bytes that are
+// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
+func ReplaceIllFormed() Transformer {
+ return Transformer{&replaceIllFormed{}}
+}
+
+type replaceIllFormed struct{ transform.NopResetter }
+
+func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
+ for n < len(src) {
+ // ASCII fast path.
+ if src[n] < utf8.RuneSelf {
+ n++
+ continue
+ }
+
+ r, size := utf8.DecodeRune(src[n:])
+
+ // Look for a valid non-ASCII rune.
+ if r != utf8.RuneError || size != 1 {
+ n += size
+ continue
+ }
+
+ // Look for short source data.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ // We have an invalid rune.
+ err = transform.ErrEndOfSpan
+ break
+ }
+ return n, err
+}
+
+func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for nSrc < len(src) {
+ // ASCII fast path.
+ if r := src[nSrc]; r < utf8.RuneSelf {
+ if nDst == len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst] = r
+ nDst++
+ nSrc++
+ continue
+ }
+
+ // Look for a valid non-ASCII rune.
+ if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
+ if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+ err = transform.ErrShortDst
+ break
+ }
+ nDst += size
+ nSrc += size
+ continue
+ }
+
+ // Look for short source data.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ // We have an invalid rune.
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ nSrc++
+ }
+ return nDst, nSrc, err
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/.editorconfig b/test/integration/vendor/gopkg.in/ini.v1/.editorconfig
new file mode 100644
index 000000000..4a2d9180f
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/.editorconfig
@@ -0,0 +1,12 @@
+# http://editorconfig.org
+
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*_test.go]
+trim_trailing_whitespace = false
diff --git a/test/integration/vendor/gopkg.in/ini.v1/.gitignore b/test/integration/vendor/gopkg.in/ini.v1/.gitignore
new file mode 100644
index 000000000..588388bda
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/.gitignore
@@ -0,0 +1,7 @@
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini
+.idea
+/.vscode
+.DS_Store
diff --git a/test/integration/vendor/gopkg.in/ini.v1/.golangci.yml b/test/integration/vendor/gopkg.in/ini.v1/.golangci.yml
new file mode 100644
index 000000000..631e36925
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/.golangci.yml
@@ -0,0 +1,27 @@
+linters-settings:
+ staticcheck:
+ checks: [
+ "all",
+ "-SA1019" # There are valid use cases of strings.Title
+ ]
+ nakedret:
+ max-func-lines: 0 # Disallow any unnamed return statement
+
+linters:
+ enable:
+ - deadcode
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+ - nakedret
+ - gofmt
+ - rowserrcheck
+ - unconvert
+ - goimports
+ - unparam
diff --git a/test/integration/vendor/gopkg.in/ini.v1/LICENSE b/test/integration/vendor/gopkg.in/ini.v1/LICENSE
new file mode 100644
index 000000000..d361bbcdf
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/test/integration/vendor/gopkg.in/ini.v1/Makefile b/test/integration/vendor/gopkg.in/ini.v1/Makefile
new file mode 100644
index 000000000..f3b0dae2d
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/Makefile
@@ -0,0 +1,15 @@
+.PHONY: build test bench vet coverage
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -test.bench=. -test.benchmem
+
+vet:
+ go vet
+
+coverage:
+ go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
diff --git a/test/integration/vendor/gopkg.in/ini.v1/README.md b/test/integration/vendor/gopkg.in/ini.v1/README.md
new file mode 100644
index 000000000..30606d970
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/README.md
@@ -0,0 +1,43 @@
+# INI
+
+[](https://github.com/go-ini/ini/actions?query=branch%3Amain)
+[](https://codecov.io/gh/go-ini/ini)
+[](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
+[](https://sourcegraph.com/github.com/go-ini/ini)
+
+
+
+Package ini provides INI file read and write functionality in Go.
+
+## Features
+
+- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+The minimum requirement of Go is **1.13**.
+
+```sh
+$ go get gopkg.in/ini.v1
+```
+
+Please add `-u` flag to update in the future.
+
+## Getting Help
+
+- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- 中国大陆镜像:https://ini.unknwon.cn
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/test/integration/vendor/gopkg.in/ini.v1/codecov.yml b/test/integration/vendor/gopkg.in/ini.v1/codecov.yml
new file mode 100644
index 000000000..e02ec84bc
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/codecov.yml
@@ -0,0 +1,16 @@
+coverage:
+ range: "60...95"
+ status:
+ project:
+ default:
+ threshold: 1%
+ informational: true
+ patch:
+ defualt:
+ only_pulls: true
+ informational: true
+
+comment:
+ layout: 'diff'
+
+github_checks: false
diff --git a/test/integration/vendor/gopkg.in/ini.v1/data_source.go b/test/integration/vendor/gopkg.in/ini.v1/data_source.go
new file mode 100644
index 000000000..c3a541f1d
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/data_source.go
@@ -0,0 +1,76 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+var (
+ _ dataSource = (*sourceFile)(nil)
+ _ dataSource = (*sourceData)(nil)
+ _ dataSource = (*sourceReadCloser)(nil)
+)
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ case io.Reader:
+ return &sourceReadCloser{ioutil.NopCloser(s)}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
+ }
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/deprecated.go b/test/integration/vendor/gopkg.in/ini.v1/deprecated.go
new file mode 100644
index 000000000..48b8e66d6
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/deprecated.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+var (
+ // Deprecated: Use "DefaultSection" instead.
+ DEFAULT_SECTION = DefaultSection
+ // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore = SnackCase
+)
diff --git a/test/integration/vendor/gopkg.in/ini.v1/error.go b/test/integration/vendor/gopkg.in/ini.v1/error.go
new file mode 100644
index 000000000..f66bc94b8
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/error.go
@@ -0,0 +1,49 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
+
+// ErrEmptyKeyName indicates the error type of no key name is found which there should be one.
+type ErrEmptyKeyName struct {
+ Line string
+}
+
+// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName.
+func IsErrEmptyKeyName(err error) bool {
+ _, ok := err.(ErrEmptyKeyName)
+ return ok
+}
+
+func (err ErrEmptyKeyName) Error() string {
+ return fmt.Sprintf("empty key name: %s", err.Line)
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/file.go b/test/integration/vendor/gopkg.in/ini.v1/file.go
new file mode 100644
index 000000000..f8b22408b
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/file.go
@@ -0,0 +1,541 @@
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+)
+
+// File represents a combination of one or more INI files in memory.
+type File struct {
+ options LoadOptions
+ dataSources []dataSource
+
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ lock sync.RWMutex
+
+ // To keep data in order.
+ sectionList []string
+ // To keep track of the index of a section with same name.
+ // This meta list is only used with non-unique section names are allowed.
+ sectionIndexes []int
+
+ // Actual data is stored here.
+ sections map[string][]*Section
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ if len(opts.KeyValueDelimiters) == 0 {
+ opts.KeyValueDelimiters = "=:"
+ }
+ if len(opts.KeyValueDelimiterOnWrite) == 0 {
+ opts.KeyValueDelimiterOnWrite = "="
+ }
+ if len(opts.ChildSectionDelimiter) == 0 {
+ opts.ChildSectionDelimiter = "."
+ }
+
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string][]*Section),
+ options: opts,
+ }
+}
+
+// Empty returns an empty file object.
+func Empty(opts ...LoadOptions) *File {
+ var opt LoadOptions
+ if len(opts) > 0 {
+ opt = opts[0]
+ }
+
+ // Ignore error here, we are sure our data is good.
+ f, _ := LoadSources(opt, []byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("empty section name")
+ }
+
+ if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
+ return f.sections[name][0], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+
+ // NOTE: Append to indexes must happen before appending to sections,
+ // otherwise index will have off-by-one problem.
+ f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
+
+ sec := newSection(f, name)
+ f.sections[name] = append(f.sections[name], sec)
+
+ return sec, nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return secs[0], err
+}
+
+// HasSection returns true if the file contains a section with given name.
+func (f *File) HasSection(name string) bool {
+ section, _ := f.GetSection(name)
+ return section != nil
+}
+
+// SectionsByName returns all sections with given name.
+func (f *File) SectionsByName(name string) ([]*Section, error) {
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ secs := f.sections[name]
+ if len(secs) == 0 {
+ return nil, fmt.Errorf("section %q does not exist", name)
+ }
+
+ return secs, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ if name == "" {
+ name = DefaultSection
+ }
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// SectionWithIndex assumes named section exists and returns a new section when not.
+func (f *File) SectionWithIndex(name string, index int) *Section {
+ secs, err := f.SectionsByName(name)
+ if err != nil || len(secs) <= index {
+ // NOTE: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ newSec, _ := f.NewSection(name)
+ return newSec
+ }
+
+ return secs[index]
+}
+
+// Sections returns a list of Section stored in the current instance.
+func (f *File) Sections() []*Section {
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sections := make([]*Section, len(f.sectionList))
+ for i, name := range f.sectionList {
+ sections[i] = f.sections[name][f.sectionIndexes[i]]
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section or all sections with given name.
+func (f *File) DeleteSection(name string) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(secs); i++ {
+ // For non-unique sections, it is always needed to remove the first one so
+ // in the next iteration, the subsequent section continue having index 0.
+ // Ignoring the error as index 0 never returns an error.
+ _ = f.DeleteSectionWithIndex(name, 0)
+ }
+}
+
+// DeleteSectionWithIndex deletes a section with given name and index.
+func (f *File) DeleteSectionWithIndex(name string, index int) error {
+ if !f.options.AllowNonUniqueSections && index != 0 {
+ return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
+ }
+
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ // Count occurrences of the sections
+ occurrences := 0
+
+ sectionListCopy := make([]string, len(f.sectionList))
+ copy(sectionListCopy, f.sectionList)
+
+ for i, s := range sectionListCopy {
+ if s != name {
+ continue
+ }
+
+ if occurrences == index {
+ if len(f.sections[name]) <= 1 {
+ delete(f.sections, name) // The last one in the map
+ } else {
+ f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
+ }
+
+ // Fix section lists
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
+
+ } else if occurrences > index {
+ // Fix the indices of all following sections with this name.
+ f.sectionIndexes[i-1]--
+ }
+
+ occurrences++
+ }
+
+ return nil
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ _ = f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ if f.options.ShortCircuit {
+ return nil
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
+
+ if PrettyFormat || PrettyEqual {
+ equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ lastSectionIdx := len(f.sectionList) - 1
+ for i, sname := range f.sectionList {
+ sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
+ if len(sec.Comment) > 0 {
+ // Support multiline comments
+ lines := strings.Split(sec.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ isLastSection := i == lastSectionIdx
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+
+ if PrettySection && !isLastSection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modified if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KeyList:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ // Support multiline comments
+ lines := strings.Split(key.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + strings.TrimSpace(lines[i])
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ writeKeyValue := func(val string) (bool, error) {
+ if _, err := buf.WriteString(kname); err != nil {
+ return false, err
+ }
+
+ if key.isBooleanType {
+ buf.WriteString(LineBreak)
+ return true, nil
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ } else if len(strings.TrimSpace(val)) != len(val) {
+ val = `"` + val + `"`
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return false, err
+ }
+ return false, nil
+ }
+
+ shadows := key.ValueWithShadows()
+ if len(shadows) == 0 {
+ if _, err := writeKeyValue(""); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, val := range shadows {
+ exitLoop, err := writeKeyValue(val)
+ if err != nil {
+ return nil, err
+ } else if exitLoop {
+ continue KeyList
+ }
+ }
+
+ for _, val := range key.nestedValues {
+ if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection && !isLastSection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename after done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/helper.go b/test/integration/vendor/gopkg.in/ini.v1/helper.go
new file mode 100644
index 000000000..f9d80a682
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/helper.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/ini.go b/test/integration/vendor/gopkg.in/ini.v1/ini.go
new file mode 100644
index 000000000..99e7f8651
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/ini.go
@@ -0,0 +1,176 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+const (
+ // Maximum allowed depth when recursively substituing variable names.
+ depthValues = 99
+)
+
+var (
+ // DefaultSection is the name of default section. You can use this var or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DefaultSection = "DEFAULT"
+
+ // LineBreak is the delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
+
+ // DefaultHeader explicitly writes default section header.
+ DefaultHeader = false
+
+ // PrettySection indicates whether to put a line between sections.
+ PrettySection = true
+ // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+ // PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
+ PrettyEqual = false
+ // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatLeft = ""
+ // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatRight = ""
+)
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+func init() {
+ if runtime.GOOS == "windows" && !inTest {
+ LineBreak = "\r\n"
+ }
+}
+
+// LoadOptions contains all customized options used for load data source(s).
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // InsensitiveSections indicates whether the parser forces all section to lowercase.
+ InsensitiveSections bool
+ // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
+ InsensitiveKeys bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
+ SkipUnrecognizableLines bool
+ // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
+ ShortCircuit bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // AllowNestedValues indicates whether to allow AWS-like nested values.
+ // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
+ AllowNestedValues bool
+ // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
+ // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
+ // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
+ // than the first line of the value.
+ AllowPythonMultilineValues bool
+ // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
+ // Docs: https://docs.python.org/2/library/configparser.html
+ // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
+ // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
+ SpaceBeforeInlineComment bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
+ // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
+ // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
+ // when value is NOT surrounded by any quotes.
+ // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
+ UnescapeValueCommentSymbols bool
+ // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+ // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
+ KeyValueDelimiters string
+ // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
+ KeyValueDelimiterOnWrite string
+ // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
+ ChildSectionDelimiter string
+ // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
+ PreserveSurroundedQuote bool
+ // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
+ DebugFunc DebugFunc
+ // ReaderBufferSize is the buffer size of the reader in bytes.
+ ReaderBufferSize int
+ // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
+ AllowNonUniqueSections bool
+ // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated.
+ AllowDuplicateShadowValues bool
+}
+
+// DebugFunc is the type of function called to log parse events.
+type DebugFunc func(message string)
+
+// LoadSources allows caller to apply customized options for loading from data source(s).
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// ShadowLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/key.go b/test/integration/vendor/gopkg.in/ini.v1/key.go
new file mode 100644
index 000000000..a19d9f38e
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/key.go
@@ -0,0 +1,837 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ nestedValues []string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ if !k.s.f.options.AllowDuplicateShadowValues {
+ // Deduplicate shadows based on their values.
+ if k.value == val {
+ return nil
+ }
+ for i := range k.shadows {
+ if k.shadows[i].value == val {
+ return nil
+ }
+ }
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+func (k *Key) addNestedValue(val string) error {
+ if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add nested value to auto-increment or boolean key")
+ }
+
+ k.nestedValues = append(k.nestedValues, val)
+ return nil
+}
+
+// AddNestedValue adds a nested value to the key.
+func (k *Key) AddNestedValue(val string) error {
+ if !k.s.f.options.AllowNestedValues {
+ return errors.New("nested value is not allowed")
+ }
+ return k.addNestedValue(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any. Shadow
+// keys with empty values are ignored from the returned list.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ if k.value == "" {
+ return []string{}
+ }
+ return []string{k.value}
+ }
+
+ vals := make([]string, 0, len(k.shadows)+1)
+ if k.value != "" {
+ vals = append(vals, k.value)
+ }
+ for _, s := range k.shadows {
+ if s.value != "" {
+ vals = append(vals, s.value)
+ }
+ }
+ return vals
+}
+
+// NestedValues returns nested values stored in the key.
+// It is possible returned value is nil if no nested values stored in the key.
+func (k *Key) NestedValues() []string {
+ return k.nestedValues
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < depthValues; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := vr[2 : len(vr)-2]
+
+ // Search in the same section.
+ // If not found or found the key itself, then search again in default section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil || k == nk {
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ if nk == nil {
+ // Stop when no results found in the default section,
+ // and returns the value as-is.
+ break
+ }
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ v, err := strconv.ParseInt(k.String(), 0, 64)
+ return int(v), err
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 0, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 0, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 0, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx++
+ if idx == len(runes) {
+ break
+ }
+ }
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Bools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidBools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
+func (k *Key) StrictBools(delim string) ([]bool, error) {
+ return k.parseBools(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseBools transforms strings to bools.
+func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
+ vals := make([]bool, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := parseBool(str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(bool))
+ }
+ }
+ return vals, err
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseFloat(str, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(float64))
+ }
+ }
+ return vals, err
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, int(val.(int64)))
+ }
+ }
+ return vals, err
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(int64))
+ }
+ }
+ return vals, err
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, uint(val.(uint64)))
+ }
+ }
+ return vals, err
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(uint64))
+ }
+ }
+ return vals, err
+}
+
+type Parser func(str string) (interface{}, error)
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := time.Parse(format, str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(time.Time))
+ }
+ }
+ return vals, err
+}
+
+// doParse transforms strings to different types
+func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
+ vals := make([]interface{}, 0, len(strs))
+ for _, str := range strs {
+ val, err := parser(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/parser.go b/test/integration/vendor/gopkg.in/ini.v1/parser.go
new file mode 100644
index 000000000..44fc526c2
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/parser.go
@@ -0,0 +1,520 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+const minReaderBufferSize = 4096
+
+var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
+
+type parserOptions struct {
+ IgnoreContinuation bool
+ IgnoreInlineComment bool
+ AllowPythonMultilineValues bool
+ SpaceBeforeInlineComment bool
+ UnescapeValueDoubleQuotes bool
+ UnescapeValueCommentSymbols bool
+ PreserveSurroundedQuote bool
+ DebugFunc DebugFunc
+ ReaderBufferSize int
+}
+
+type parser struct {
+ buf *bufio.Reader
+ options parserOptions
+
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func (p *parser) debug(format string, args ...interface{}) {
+ if p.options.DebugFunc != nil {
+ p.options.DebugFunc(fmt.Sprintf(format, args...))
+ }
+}
+
+func newParser(r io.Reader, opts parserOptions) *parser {
+ size := opts.ReaderBufferSize
+ if size < minReaderBufferSize {
+ size = minReaderBufferSize
+ }
+
+ return &parser{
+ buf: bufio.NewReaderSize(r, size),
+ options: opts,
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(delimiters string, in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && line[0:3] == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ var endIdx int
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], delimiters)
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, delimiters)
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ if endIdx == 0 {
+ return "", -1, ErrEmptyKeyName{line}
+ }
+
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
+
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && line[0:3] == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ lastChar := line[len(line)-1]
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+ trimmedLastChar := line[len(line)-1]
+
+ // Check continuation lines when desired
+ if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !p.options.IgnoreInlineComment {
+ var i int
+ if p.options.SpaceBeforeInlineComment {
+ i = strings.Index(line, " #")
+ if i == -1 {
+ i = strings.Index(line, " ;")
+ }
+
+ } else {
+ i = strings.IndexAny(line, "#;")
+ }
+
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ }
+
+ // Trim single and double quotes
+ if (hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
+ line = line[1 : len(line)-1]
+ } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
+ line = strings.ReplaceAll(line, `\;`, ";")
+ line = strings.ReplaceAll(line, `\#`, "#")
+ } else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+
+ return line, nil
+}
+
+func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
+ parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
+ peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
+
+ for {
+ peekData, peekErr := peekBuffer.ReadBytes('\n')
+ if peekErr != nil && peekErr != io.EOF {
+ p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
+ return "", peekErr
+ }
+
+ p.debug("readPythonMultilines: parsing %q", string(peekData))
+
+ peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
+ p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
+ for n, v := range peekMatches {
+ p.debug(" %d: %q", n, v)
+ }
+
+ // Return if not a Python multiline value.
+ if len(peekMatches) != 3 {
+ p.debug("readPythonMultilines: end of value, got: %q", line)
+ return line, nil
+ }
+
+ // Advance the parser reader (buffer) in-sync with the peek buffer.
+ _, err := p.buf.Discard(len(peekData))
+ if err != nil {
+ p.debug("readPythonMultilines: failed to skip to the end, returning error")
+ return "", err
+ }
+
+ line += "\n" + peekMatches[0]
+ }
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader, parserOptions{
+ IgnoreContinuation: f.options.IgnoreContinuation,
+ IgnoreInlineComment: f.options.IgnoreInlineComment,
+ AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
+ SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
+ UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
+ UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
+ PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
+ DebugFunc: f.options.DebugFunc,
+ ReaderBufferSize: f.options.ReaderBufferSize,
+ })
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ name := DefaultSection
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(DefaultSection)
+ }
+ section, _ := f.NewSection(name)
+
+ // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
+ var isLastValueEmpty bool
+ var lastRegularKey *Key
+
+ var line []byte
+ var inUnparseableSection bool
+
+ // NOTE: Iterate and increase `currentPeekSize` until
+ // the size of the parser buffer is found.
+ // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
+ parserBufferSize := 0
+ // NOTE: Peek 4kb at a time.
+ currentPeekSize := minReaderBufferSize
+
+ if f.options.AllowPythonMultilineValues {
+ for {
+ peekBytes, _ := p.buf.Peek(currentPeekSize)
+ peekBytesLength := len(peekBytes)
+
+ if parserBufferSize >= peekBytesLength {
+ break
+ }
+
+ currentPeekSize *= 2
+ parserBufferSize = peekBytesLength
+ }
+ }
+
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ if f.options.AllowNestedValues &&
+ isLastValueEmpty && len(line) > 0 {
+ if line[0] == ' ' || line[0] == '\t' {
+ err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.LastIndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset auto-counter and comments
+ p.comment.Reset()
+ p.count = 1
+ // Nested values can't span sections
+ isLastValueEmpty = false
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
+ if err != nil {
+ switch {
+ // Treat as boolean key when desired, and whole line is key name.
+ case IsErrDelimiterNotFound(err):
+ switch {
+ case f.options.AllowBooleanKeys:
+ kname, err := p.readValue(line, parserBufferSize)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+
+ case f.options.SkipUnrecognizableLines:
+ continue
+ }
+ case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines:
+ continue
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:], parserBufferSize)
+ if err != nil {
+ return err
+ }
+ isLastValueEmpty = len(value) == 0
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ lastRegularKey = key
+ }
+ return nil
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/section.go b/test/integration/vendor/gopkg.in/ini.v1/section.go
new file mode 100644
index 000000000..a3615d820
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/section.go
@@ -0,0 +1,256 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// SetBody updates body content only if section is raw.
+func (s *Section) SetBody(body string) {
+ if !s.isRawSection {
+ return
+ }
+ s.rawBody = body
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ s.keysHash[name] = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ }
+ break
+ }
+ return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Deprecated: Use "HasKey" instead.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := make(map[string]string, len(s.keysHash))
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ delete(s.keysHash, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + s.f.options.ChildSectionDelimiter
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name]...)
+ }
+ }
+ return children
+}
diff --git a/test/integration/vendor/gopkg.in/ini.v1/struct.go b/test/integration/vendor/gopkg.in/ini.v1/struct.go
new file mode 100644
index 000000000..a486b2fe0
--- /dev/null
+++ b/test/integration/vendor/gopkg.in/ini.v1/struct.go
@@ -0,0 +1,747 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // SnackCase converts to format SNACK_CASE.
+ SnackCase NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= 'A' - 'a'
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflect.Bool:
+ vals, err = key.parseBools(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if err != nil && isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflect.Bool:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to struct.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ vt := t
+ isPtr := t.Kind() == reflect.Ptr
+ if isPtr {
+ vt = t.Elem()
+ }
+ switch vt.Kind() {
+ case reflect.String:
+ stringVal := key.String()
+ if isPtr {
+ field.Set(reflect.ValueOf(&stringVal))
+ } else if len(stringVal) > 0 {
+ field.SetString(key.String())
+ }
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&boolVal))
+ } else {
+ field.SetBool(boolVal)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // ParseDuration will not return err for `0`, so check the type name
+ if vt.Name() == "Duration" {
+ durationVal, err := key.Duration()
+ if err != nil {
+ if intVal, err := key.Int64(); err == nil {
+ field.SetInt(intVal)
+ return nil
+ }
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else if int64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetInt(intVal)
+ field.Set(pv)
+ } else {
+ field.SetInt(intVal)
+ }
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && uint64(durationVal) > 0 {
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetUint(uintVal)
+ field.Set(pv)
+ } else {
+ field.SetUint(uintVal)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetFloat(floatVal)
+ field.Set(pv)
+ } else {
+ field.SetFloat(floatVal)
+ }
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&timeVal))
+ } else {
+ field.Set(reflect.ValueOf(timeVal))
+ }
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
+ opts := strings.SplitN(tag, ",", 5)
+ rawName = opts[0]
+ for _, opt := range opts[1:] {
+ omitEmpty = omitEmpty || (opt == "omitempty")
+ allowShadow = allowShadow || (opt == "allowshadow")
+ allowNonUnique = allowNonUnique || (opt == "nonunique")
+ extends = extends || (opt == "extends")
+ }
+ return rawName, omitEmpty, allowShadow, allowNonUnique, extends
+}
+
+// mapToField maps the given value to the matching field of the given section.
+// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
+func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
+ isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ if isAnonymousPtr {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ fieldSection := s
+ if rawName != "" {
+ sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
+ if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
+ fieldSection = secs[sectionIndex]
+ }
+ }
+ if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ } else if isAnonymousPtr || isStruct || isStructPtr {
+ if secs, err := s.f.SectionsByName(fieldName); err == nil {
+ if len(secs) <= sectionIndex {
+ return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
+ }
+ // Only set the field to non-nil struct value if we have a section for it.
+ // Otherwise, we end up with a non-nil struct ptr even though there is no data.
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ // Map non-unique sections
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(fieldName, field, isStrict)
+ if err != nil {
+ return fmt.Errorf("map to slice %q: %v", fieldName, err)
+ }
+
+ field.Set(newField)
+ continue
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("set field %q: %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// mapToSlice maps all sections with the same name and returns the new value.
+// The type of the Value must be a slice.
+func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
+ secs, err := s.f.SectionsByName(secName)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ typ := val.Type().Elem()
+ for i, sec := range secs {
+ elem := reflect.New(typ)
+ if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
+ return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
+ }
+
+ val = reflect.Append(val, elem.Elem())
+ }
+ return val, nil
+}
+
+// mapTo maps a section to object v.
+func (s *Section) mapTo(v interface{}, isStrict bool) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ if typ.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(s.name, val, isStrict)
+ if err != nil {
+ return err
+ }
+
+ val.Set(newField)
+ return nil
+ }
+
+ return s.mapToField(val, isStrict, 0, s.name)
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ return s.mapTo(v, false)
+}
+
+// StrictMapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ return s.mapTo(v, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// StrictMapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapToWithMapper maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ if allowShadow {
+ var keyWithShadows *Key
+ for i := 0; i < field.Len(); i++ {
+ var val string
+ switch sliceOf {
+ case reflect.String:
+ val = slice.Index(i).String()
+ case reflect.Int, reflect.Int64:
+ val = fmt.Sprint(slice.Index(i).Int())
+ case reflect.Uint, reflect.Uint64:
+ val = fmt.Sprint(slice.Index(i).Uint())
+ case reflect.Float64:
+ val = fmt.Sprint(slice.Index(i).Float())
+ case reflect.Bool:
+ val = fmt.Sprint(slice.Index(i).Bool())
+ case reflectTime:
+ val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+
+ if i == 0 {
+ keyWithShadows = newKey(key.s, key.name, val)
+ } else {
+ _ = keyWithShadows.AddShadow(val)
+ }
+ }
+ *key = *keyWithShadows
+ return nil
+ }
+
+ var buf bytes.Buffer
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflect.Bool:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-len(delim)])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim, allowShadow)
+ case reflect.Ptr:
+ if !field.IsNil() {
+ return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
+ }
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
+type StructReflector interface {
+ ReflectINIStruct(*File) error
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ if !val.Field(i).CanInterface() {
+ continue
+ }
+
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ if omitEmpty && isEmptyValue(field) {
+ continue
+ }
+
+ if r, ok := field.Interface().(StructReflector); ok {
+ return r.ReflectINIStruct(s.f)
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
+ if err := s.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ for i := 0; i < field.Len(); i++ {
+ if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
+ return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
+ }
+
+ sec, err := s.f.NewSection(fieldName)
+ if err != nil {
+ return err
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err := sec.reflectFrom(slice.Index(i)); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ }
+ continue
+ }
+
+ // Note: Same reason as section.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ return fmt.Errorf("reflect field %q: %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects section from given struct. It overwrites existing ones.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+
+ if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
+ (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
+ // Clear sections to make sure none exists before adding the new ones
+ s.f.DeleteSection(s.name)
+
+ if typ.Kind() == reflect.Ptr {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+ return sec.reflectFrom(val.Elem())
+ }
+
+ slice := val.Slice(0, val.Len())
+ sliceOf := val.Type().Elem().Kind()
+ if sliceOf != reflect.Ptr {
+ return fmt.Errorf("not a slice of pointers")
+ }
+
+ for i := 0; i < slice.Len(); i++ {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+
+ err = sec.reflectFrom(slice.Index(i))
+ if err != nil {
+ return fmt.Errorf("reflect from %dth field: %v", i, err)
+ }
+ }
+
+ return nil
+ }
+
+ if typ.Kind() == reflect.Ptr {
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFromWithMapper reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/test/integration/vendor/modules.txt b/test/integration/vendor/modules.txt
index 341bffff5..feb7d094d 100644
--- a/test/integration/vendor/modules.txt
+++ b/test/integration/vendor/modules.txt
@@ -1,5 +1,3 @@
-# cloud.google.com/go/compute/metadata v0.2.1
-## explicit; go 1.19
# github.com/AlecAivazis/survey/v2 v2.3.6
## explicit; go 1.13
github.com/AlecAivazis/survey/v2
@@ -225,6 +223,9 @@ github.com/docker/go-units
# github.com/felixge/httpsnoop v1.0.3
## explicit; go 1.13
github.com/felixge/httpsnoop
+# github.com/fsnotify/fsnotify v1.6.0
+## explicit; go 1.16
+github.com/fsnotify/fsnotify
# github.com/fvbommel/sortorder v1.0.2
## explicit; go 1.13
github.com/fvbommel/sortorder
@@ -298,6 +299,10 @@ github.com/gorilla/mux
# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
## explicit; go 1.14
github.com/grpc-ecosystem/go-grpc-middleware
+github.com/grpc-ecosystem/go-grpc-middleware/retry
+github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
+github.com/grpc-ecosystem/go-grpc-middleware/util/metautils
+github.com/grpc-ecosystem/go-grpc-middleware/validator
# github.com/grpc-ecosystem/grpc-gateway v1.16.0
## explicit; go 1.14
github.com/grpc-ecosystem/grpc-gateway/internal
@@ -312,6 +317,18 @@ github.com/hashicorp/go-multierror
# github.com/hashicorp/go-version v1.6.0
## explicit
github.com/hashicorp/go-version
+# github.com/hashicorp/hcl v1.0.0
+## explicit
+github.com/hashicorp/hcl
+github.com/hashicorp/hcl/hcl/ast
+github.com/hashicorp/hcl/hcl/parser
+github.com/hashicorp/hcl/hcl/printer
+github.com/hashicorp/hcl/hcl/scanner
+github.com/hashicorp/hcl/hcl/strconv
+github.com/hashicorp/hcl/hcl/token
+github.com/hashicorp/hcl/json/parser
+github.com/hashicorp/hcl/json/scanner
+github.com/hashicorp/hcl/json/token
# github.com/imdario/mergo v0.3.13
## explicit; go 1.13
github.com/imdario/mergo
@@ -335,8 +352,12 @@ github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/kr/pretty v0.3.1
-## explicit; go 1.12
+# github.com/klauspost/cpuid/v2 v2.1.0
+## explicit; go 1.15
+github.com/klauspost/cpuid/v2
+# github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281
+## explicit; go 1.16
+github.com/lufia/plan9stats
# github.com/magiconair/properties v1.8.7
## explicit; go 1.19
github.com/magiconair/properties
@@ -452,8 +473,24 @@ github.com/modern-go/reflect2
# github.com/morikuni/aec v1.0.0
## explicit
github.com/morikuni/aec
+# github.com/nginx/agent/sdk/v2 v2.0.0-00010101000000-000000000000 => ./../../sdk
+## explicit; go 1.19
+github.com/nginx/agent/sdk/v2
+github.com/nginx/agent/sdk/v2/agent/config
+github.com/nginx/agent/sdk/v2/checksum
+github.com/nginx/agent/sdk/v2/client
+github.com/nginx/agent/sdk/v2/files
+github.com/nginx/agent/sdk/v2/grpc
+github.com/nginx/agent/sdk/v2/interceptors
+github.com/nginx/agent/sdk/v2/proto
+github.com/nginx/agent/sdk/v2/proto/common
+github.com/nginx/agent/sdk/v2/proto/events
+github.com/nginx/agent/sdk/v2/zip
# github.com/nginx/agent/v2 v2.22.0
## explicit; go 1.19
+github.com/nginx/agent/v2/src/core
+github.com/nginx/agent/v2/src/core/config
+github.com/nginx/agent/v2/src/core/network
github.com/nginx/agent/v2/src/extensions/advanced-metrics/aggregator
github.com/nginx/agent/v2/src/extensions/advanced-metrics/ingester
github.com/nginx/agent/v2/src/extensions/advanced-metrics/pkg/advanced-metrics
@@ -469,6 +506,11 @@ github.com/nginx/agent/v2/src/extensions/advanced-metrics/tables/schema
github.com/nginx/agent/v2/test/integration/nginx
github.com/nginx/agent/v2/test/integration/upstream
github.com/nginx/agent/v2/test/integration/validator
+github.com/nginx/agent/v2/test/utils
+github.com/nginx/agent/v2/test/utils/system
+# github.com/nginxinc/nginx-go-crossplane v0.4.1
+## explicit; go 1.15
+github.com/nginxinc/nginx-go-crossplane
# github.com/opencontainers/go-digest v1.0.0
## explicit; go 1.13
github.com/opencontainers/go-digest
@@ -487,12 +529,21 @@ github.com/orcaman/concurrent-map
# github.com/pelletier/go-toml v1.9.5
## explicit; go 1.12
github.com/pelletier/go-toml
+# github.com/pelletier/go-toml/v2 v2.0.5
+## explicit; go 1.16
+github.com/pelletier/go-toml/v2
+github.com/pelletier/go-toml/v2/internal/ast
+github.com/pelletier/go-toml/v2/internal/danger
+github.com/pelletier/go-toml/v2/internal/tracker
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
+# github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c
+## explicit; go 1.14
+github.com/power-devops/perfstat
# github.com/prometheus/client_golang v1.13.0
## explicit; go 1.17
github.com/prometheus/client_golang/prometheus
@@ -514,6 +565,8 @@ github.com/prometheus/procfs/internal/util
# github.com/rivo/uniseg v0.2.0
## explicit; go 1.12
github.com/rivo/uniseg
+# github.com/rogpeppe/go-internal v1.9.0
+## explicit; go 1.17
# github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
## explicit
github.com/serialx/hashring
@@ -524,21 +577,57 @@ github.com/shirou/gopsutil/internal/common
github.com/shirou/gopsutil/mem
github.com/shirou/gopsutil/net
github.com/shirou/gopsutil/process
+# github.com/shirou/gopsutil/v3 v3.22.7
+## explicit; go 1.15
+github.com/shirou/gopsutil/v3/cpu
+github.com/shirou/gopsutil/v3/disk
+github.com/shirou/gopsutil/v3/host
+github.com/shirou/gopsutil/v3/internal/common
+github.com/shirou/gopsutil/v3/mem
+github.com/shirou/gopsutil/v3/net
+github.com/shirou/gopsutil/v3/process
# github.com/sirupsen/logrus v1.9.0
## explicit; go 1.13
github.com/sirupsen/logrus
+# github.com/spf13/afero v1.9.2
+## explicit; go 1.16
+github.com/spf13/afero
+github.com/spf13/afero/internal/common
+github.com/spf13/afero/mem
+# github.com/spf13/cast v1.5.0
+## explicit; go 1.18
+github.com/spf13/cast
# github.com/spf13/cobra v1.6.1
## explicit; go 1.15
github.com/spf13/cobra
+# github.com/spf13/jwalterweatherman v1.1.0
+## explicit
+github.com/spf13/jwalterweatherman
# github.com/spf13/pflag v1.0.5
## explicit; go 1.12
github.com/spf13/pflag
# github.com/spf13/viper v1.14.0
## explicit; go 1.17
+github.com/spf13/viper
+github.com/spf13/viper/internal/encoding
+github.com/spf13/viper/internal/encoding/dotenv
+github.com/spf13/viper/internal/encoding/hcl
+github.com/spf13/viper/internal/encoding/ini
+github.com/spf13/viper/internal/encoding/javaproperties
+github.com/spf13/viper/internal/encoding/json
+github.com/spf13/viper/internal/encoding/toml
+github.com/spf13/viper/internal/encoding/yaml
+# github.com/stretchr/objx v0.5.0
+## explicit; go 1.12
+github.com/stretchr/objx
# github.com/stretchr/testify v1.8.1
## explicit; go 1.13
github.com/stretchr/testify/assert
+github.com/stretchr/testify/mock
github.com/stretchr/testify/require
+# github.com/subosito/gotenv v1.4.1
+## explicit; go 1.18
+github.com/subosito/gotenv
# github.com/testcontainers/testcontainers-go v0.17.0
## explicit; go 1.18
github.com/testcontainers/testcontainers-go
@@ -579,6 +668,9 @@ github.com/tonistiigi/units
# github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f
## explicit; go 1.12
github.com/tonistiigi/vt100
+# github.com/vardius/message-bus v1.1.5
+## explicit; go 1.12
+github.com/vardius/message-bus
# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
## explicit
github.com/xeipuuv/gojsonpointer
@@ -708,6 +800,7 @@ golang.org/x/sys/windows/registry
golang.org/x/term
# golang.org/x/text v0.6.0
## explicit; go 1.17
+golang.org/x/text/runes
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
@@ -839,6 +932,9 @@ google.golang.org/protobuf/types/known/wrapperspb
# gopkg.in/inf.v0 v0.9.1
## explicit
gopkg.in/inf.v0
+# gopkg.in/ini.v1 v1.67.0
+## explicit
+gopkg.in/ini.v1
# gopkg.in/yaml.v2 v2.4.0
## explicit; go 1.15
gopkg.in/yaml.v2
@@ -1073,3 +1169,4 @@ sigs.k8s.io/yaml
# k8s.io/api => k8s.io/api v0.22.4
# k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
# k8s.io/client-go => k8s.io/client-go v0.22.4
+# github.com/nginx/agent/sdk/v2 => ./../../sdk
diff --git a/test/performance/go.mod b/test/performance/go.mod
index 1ba2c5f45..2245d8e21 100644
--- a/test/performance/go.mod
+++ b/test/performance/go.mod
@@ -25,6 +25,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
diff --git a/test/performance/go.sum b/test/performance/go.sum
index c52e27699..f19062629 100644
--- a/test/performance/go.sum
+++ b/test/performance/go.sum
@@ -90,6 +90,7 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
+github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -449,6 +450,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/.gitignore b/test/performance/vendor/github.com/go-resty/resty/v2/.gitignore
new file mode 100644
index 000000000..9e856bd48
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/.gitignore
@@ -0,0 +1,30 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+coverage.out
+coverage.txt
+
+# Exclude intellij IDE folders
+.idea/*
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/BUILD.bazel b/test/performance/vendor/github.com/go-resty/resty/v2/BUILD.bazel
new file mode 100644
index 000000000..03bb44c3e
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/BUILD.bazel
@@ -0,0 +1,48 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@bazel_gazelle//:def.bzl", "gazelle")
+
+# gazelle:prefix github.com/go-resty/resty/v2
+# gazelle:go_naming_convention import_alias
+gazelle(name = "gazelle")
+
+go_library(
+ name = "resty",
+ srcs = [
+ "client.go",
+ "middleware.go",
+ "redirect.go",
+ "request.go",
+ "response.go",
+ "resty.go",
+ "retry.go",
+ "trace.go",
+ "transport.go",
+ "transport112.go",
+ "util.go",
+ ],
+ importpath = "github.com/go-resty/resty/v2",
+ visibility = ["//visibility:public"],
+ deps = ["@org_golang_x_net//publicsuffix:go_default_library"],
+)
+
+go_test(
+ name = "resty_test",
+ srcs = [
+ "client_test.go",
+ "context_test.go",
+ "example_test.go",
+ "request_test.go",
+ "resty_test.go",
+ "retry_test.go",
+ "util_test.go",
+ ],
+ data = glob([".testdata/*"]),
+ embed = [":resty"],
+ deps = ["@org_golang_x_net//proxy:go_default_library"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":resty",
+ visibility = ["//visibility:public"],
+)
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/LICENSE b/test/performance/vendor/github.com/go-resty/resty/v2/LICENSE
new file mode 100644
index 000000000..27326a653
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2021 Jeevanandam M., https://myjeeva.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/README.md b/test/performance/vendor/github.com/go-resty/resty/v2/README.md
new file mode 100644
index 000000000..8ec651828
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/README.md
@@ -0,0 +1,906 @@
+
+
Resty
+Simple HTTP and REST client library for Go (inspired by Ruby rest-client)
+Features section describes in detail about Resty capabilities
+
+
+
+
+
+
Resty Communication Channels
+
+
+
+## News
+
+ * v2.7.0 [released](https://github.com/go-resty/resty/releases/tag/v2.7.0) and tagged on Nov 03, 2021.
+ * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
+ * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
+ * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
+
+## Features
+
+ * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc.
+ * Simple and chainable methods for settings and request
+ * [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request) Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too
+ * Auto detects `Content-Type`
+ * Buffer less processing for `io.Reader`
+ * Native `*http.Request` instance may be accessed during middleware and request execution via `Request.RawRequest`
+ * Request Body can be read multiple times via `Request.RawRequest.GetBody()`
+ * [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Response) object gives you more possibility
+ * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()`
+ * Know your `response.Time()` and when we `response.ReceivedAt()`
+ * Automatic marshal and unmarshal for `JSON` and `XML` content type
+ * Default is `JSON`, if you supply `struct/map` without header `Content-Type`
+ * For auto-unmarshal, refer to -
+ - Success scenario [Request.SetResult()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetResult) and [Response.Result()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Result).
+ - Error scenario [Request.SetError()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetError) and [Response.Error()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Error).
+ - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml`
+ * Resty provides an option to override [JSON Marshal/Unmarshal and XML Marshal/Unmarshal](#override-json--xml-marshalunmarshal)
+ * Easy to upload one or more file(s) via `multipart/form-data`
+ * Auto detects file content type
+ * Request URL [Path Params (aka URI Params)](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetPathParams)
+ * Backoff Retry Mechanism with retry condition function [reference](retry_test.go)
+ * Resty client HTTP & REST [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnBeforeRequest) and [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnAfterResponse) middlewares
+ * `Request.SetContext` supported
+ * Authorization option of `BasicAuth` and `Bearer` token
+ * Set request `ContentLength` value for all request or particular request
+ * Custom [Root Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetRootCertificate) and Client [Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetCertificates)
+ * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetOutputDirectory) & [SetOutput](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetOutput).
+ * Cookies for your request and CookieJar support
+ * SRV Record based request instead of Host URL
+ * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc.
+ * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetAllowGetMethodPayload)
+ * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250)
+ * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604)
+ * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92)
+ * Resty design
+ * Have client level settings & options and also override at Request level if you want to
+ * Request and Response middleware
+ * Create Multiple clients if you want to `resty.New()`
+ * Supports `http.RoundTripper` implementation, see [SetTransport](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetTransport)
+ * goroutine concurrent safe
+ * Resty Client trace, see [Client.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.EnableTrace) and [Request.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.EnableTrace)
+ * Since v2.4.0, trace info contains a `RequestAttempt` value, and the `Request` object contains an `Attempt` attribute
+ * Debug mode - clean and informative logging presentation
+ * Gzip - Go does it automatically also resty has fallback handling too
+ * Works fine with `HTTP/2` and `HTTP/1.1`
+ * [Bazel support](#bazel-support)
+ * Easily mock Resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library)
+ * Well tested client library
+
+### Included Batteries
+
+ * Redirect Policies - see [how to use](#redirect-policy)
+ * NoRedirectPolicy
+ * FlexibleRedirectPolicy
+ * DomainCheckRedirectPolicy
+ * etc. [more info](redirect.go)
+ * Retry Mechanism [how to use](#retries)
+ * Backoff Retry
+ * Conditional Retry
+ * Since v2.6.0, Retry Hooks - [Client](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.AddRetryHook), [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.AddRetryHook)
+ * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412)
+ * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)).
+
+
+#### Supported Go Versions
+
+Initially Resty started supporting `go modules` since `v1.10.0` release.
+
+Starting Resty v2 and higher versions, it fully embraces [go modules](https://github.com/golang/go/wiki/Modules) package release. It requires a Go version capable of understanding `/vN` suffixed imports:
+
+- 1.9.7+
+- 1.10.3+
+- 1.11+
+
+
+## It might be beneficial for your project :smile:
+
+Resty author also published following projects for Go Community.
+
+ * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework.
+ * [THUMBAI](https://thumbai.app) - Go Mod Repository, Go Vanity Service and Simple Proxy Server.
+ * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`.
+
+
+## Installation
+
+```bash
+# Go Modules
+require github.com/go-resty/resty/v2 v2.7.0
+```
+
+## Usage
+
+The following samples will assist you to become as comfortable as possible with resty library.
+
+```go
+// Import resty into your code and refer it as `resty`.
+import "github.com/go-resty/resty/v2"
+```
+
+#### Simple GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ EnableTrace().
+ Get("https://httpbin.org/get")
+
+// Explore response object
+fmt.Println("Response Info:")
+fmt.Println(" Error :", err)
+fmt.Println(" Status Code:", resp.StatusCode())
+fmt.Println(" Status :", resp.Status())
+fmt.Println(" Proto :", resp.Proto())
+fmt.Println(" Time :", resp.Time())
+fmt.Println(" Received At:", resp.ReceivedAt())
+fmt.Println(" Body :\n", resp)
+fmt.Println()
+
+// Explore trace info
+fmt.Println("Request Trace Info:")
+ti := resp.Request.TraceInfo()
+fmt.Println(" DNSLookup :", ti.DNSLookup)
+fmt.Println(" ConnTime :", ti.ConnTime)
+fmt.Println(" TCPConnTime :", ti.TCPConnTime)
+fmt.Println(" TLSHandshake :", ti.TLSHandshake)
+fmt.Println(" ServerTime :", ti.ServerTime)
+fmt.Println(" ResponseTime :", ti.ResponseTime)
+fmt.Println(" TotalTime :", ti.TotalTime)
+fmt.Println(" IsConnReused :", ti.IsConnReused)
+fmt.Println(" IsConnWasIdle :", ti.IsConnWasIdle)
+fmt.Println(" ConnIdleTime :", ti.ConnIdleTime)
+fmt.Println(" RequestAttempt:", ti.RequestAttempt)
+fmt.Println(" RemoteAddr :", ti.RemoteAddr.String())
+
+/* Output
+Response Info:
+ Error :
+ Status Code: 200
+ Status : 200 OK
+ Proto : HTTP/2.0
+ Time : 457.034718ms
+ Received At: 2020-09-14 15:35:29.784681 -0700 PDT m=+0.458137045
+ Body :
+ {
+ "args": {},
+ "headers": {
+ "Accept-Encoding": "gzip",
+ "Host": "httpbin.org",
+ "User-Agent": "go-resty/2.4.0 (https://github.com/go-resty/resty)",
+ "X-Amzn-Trace-Id": "Root=1-5f5ff031-000ff6292204aa6898e4de49"
+ },
+ "origin": "0.0.0.0",
+ "url": "https://httpbin.org/get"
+ }
+
+Request Trace Info:
+ DNSLookup : 4.074657ms
+ ConnTime : 381.709936ms
+ TCPConnTime : 77.428048ms
+ TLSHandshake : 299.623597ms
+ ServerTime : 75.414703ms
+ ResponseTime : 79.337µs
+ TotalTime : 457.034718ms
+ IsConnReused : false
+ IsConnWasIdle : false
+ ConnIdleTime : 0s
+ RequestAttempt: 1
+ RemoteAddr : 3.221.81.55:443
+*/
+```
+
+#### Enhanced GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetQueryParams(map[string]string{
+ "page_no": "1",
+ "limit": "20",
+ "sort":"name",
+ "order": "asc",
+ "random":strconv.FormatInt(time.Now().Unix(), 10),
+ }).
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/search_result")
+
+
+// Sample of using Request.SetQueryString method
+resp, err := client.R().
+ SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more").
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/show_product")
+
+
+// If necessary, you can force response content type to tell Resty to parse a JSON response into your struct
+resp, err := client.R().
+ SetResult(result).
+ ForceContentType("application/json").
+ Get("v2/alpine/manifests/latest")
+```
+
+#### Various POST method combinations
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// POST JSON string
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{"username":"testuser", "password":"testpass"}`).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST []byte array
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST Struct, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(User{Username: "testuser", Password: "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST Map, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST of raw bytes for file upload. For example: upload file to Dropbox
+fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf")
+
+// See we are not setting content-type header, since go-resty automatically detects Content-Type for you
+resp, err := client.R().
+ SetBody(fileBytes).
+ SetContentLength(true). // Dropbox expects this value
+ SetAuthToken("").
+ SetError(&DropboxError{}). // or SetError(DropboxError{}).
+ Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too
+
+// Note: resty detects Content-Type for request body/payload if content type header is not set.
+// * For struct and map data type defaults to 'application/json'
+// * Fallback is plain text content type
+```
+
+#### Sample PUT
+
+You can use various combinations of `PUT` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Title: "go-resty",
+ Content: "This is my article content, oh ya!",
+ Author: "Jeevanandam M",
+ Tags: []string{"article", "sample", "resty"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Put("https://myapp.com/article/1234")
+```
+
+#### Sample PATCH
+
+You can use various combinations of `PATCH` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Tags: []string{"new tag1", "new tag2"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Patch("https://myapp.com/articles/1234")
+```
+
+#### Sample DELETE, HEAD, OPTIONS
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// DELETE a article
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Delete("https://myapp.com/articles/1234")
+
+// DELETE a articles with payload/body as a JSON string
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`).
+ Delete("https://myapp.com/articles")
+
+// HEAD of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Head("https://myapp.com/videos/hi-res-video")
+
+// OPTIONS of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Options("https://myapp.com/servers/nyc-dc-01")
+```
+
+#### Override JSON & XML Marshal/Unmarshal
+
+User could register choice of JSON/XML library into resty or write your own. By default resty registers standard `encoding/json` and `encoding/xml` respectively.
+```go
+// Example of registering json-iterator
+import jsoniter "github.com/json-iterator/go"
+
+json := jsoniter.ConfigCompatibleWithStandardLibrary
+
+client := resty.New()
+client.JSONMarshal = json.Marshal
+client.JSONUnmarshal = json.Unmarshal
+
+// similarly user could do for XML too with -
+client.XMLMarshal
+client.XMLUnmarshal
+```
+
+### Multipart File(s) upload
+
+#### Using io.Reader
+
+```go
+profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png")
+notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt")
+
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)).
+ SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ }).
+ Post("http://myapp.com/upload")
+```
+
+#### Using File directly from Path
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Single file scenario
+resp, err := client.R().
+ SetFile("profile_img", "/Users/jeeva/test-img.png").
+ Post("http://myapp.com/upload")
+
+// Multiple files scenario
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ Post("http://myapp.com/upload")
+
+// Multipart of form fields and files
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "my city",
+ "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD",
+ }).
+ Post("http://myapp.com/profile")
+```
+
+#### Sample Form submission
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// just mentioning about POST as an example with simple flow
+// User Login
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "username": "jeeva",
+ "password": "mypass",
+ }).
+ Post("http://myapp.com/login")
+
+// Followed by profile update
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "new city update",
+ }).
+ Post("http://myapp.com/profile")
+
+// Multi value form data
+criteria := url.Values{
+ "search_criteria": []string{"book", "glass", "pencil"},
+}
+resp, err := client.R().
+ SetFormDataFromValues(criteria).
+ Post("http://myapp.com/search")
+```
+
+#### Save HTTP Response into File
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting output directory path, If directory not exists then resty creates one!
+// This is optional one, if you're planning using absoule path in
+// `Request.SetOutput` and can used together.
+client.SetOutputDirectory("/Users/jeeva/Downloads")
+
+// HTTP response gets saved into file, similar to curl -o flag
+_, err := client.R().
+ SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+
+// OR using absolute path
+// Note: output directory path is not used for absolute path
+_, err := client.R().
+ SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+```
+
+#### Request URL Path Params
+
+Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.R().SetPathParams(map[string]string{
+ "userId": "sample@sample.com",
+ "subAccountId": "100002",
+}).
+Get("/v1/users/{userId}/{subAccountId}/details")
+
+// Result:
+// Composed URL - /v1/users/sample@sample.com/100002/details
+```
+
+#### Request and Response Middleware
+
+Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Registering Request Middleware
+client.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error {
+ // Now you have access to Client and current Request object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+
+// Registering Response Middleware
+client.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
+ // Now you have access to Client and current Response object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+```
+
+#### OnError Hooks
+
+Resty provides OnError hooks that may be called because:
+
+- The client failed to send the request due to connection timeout, TLS handshake failure, etc...
+- The request was retried the maximum amount of times, and still failed.
+
+If there was a response from the server, the original error will be wrapped in `*resty.ResponseError` which contains the last response received.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.OnError(func(req *resty.Request, err error) {
+ if v, ok := err.(*resty.ResponseError); ok {
+ // v.Response contains the last response from the server
+ // v.Err contains the original error
+ }
+ // Log the error, increment a metric, etc...
+})
+```
+
+#### Redirect Policy
+
+Resty provides few ready to use redirect policy(s) also it supports multiple policies together.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Assign Client Redirect Policy. Create one as per you need
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15))
+
+// Wanna multiple policies such as redirect count, domain name check, etc
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20),
+ resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+```
+
+##### Custom Redirect Policy
+
+Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Using raw func into resty.SetRedirectPolicy
+client.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}))
+
+//---------------------------------------------------
+
+// Using struct create more flexible redirect policy
+type CustomRedirectPolicy struct {
+ // variables goes here
+}
+
+func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}
+
+// Registering in resty
+client.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */})
+```
+
+#### Custom Root Certificates and Client Certificates
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Custom Root certificates, just supply .pem file.
+// you can add one or more root certificates, its get appended
+client.SetRootCertificate("/path/to/root/pemFile1.pem")
+client.SetRootCertificate("/path/to/root/pemFile2.pem")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key")
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Custom Root Certificates and Client Certificates from string
+
+```go
+// Custom Root certificates from string
+// You can pass you certificates throught env variables as strings
+// you can add one or more root certificates, its get appended
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.X509KeyPair([]byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"), []byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"))
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Proxy Settings - Client as well as at Request Level
+
+Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`.
+Choose as per your need.
+
+**Client Level Proxy** settings applied to all the request
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting a Proxy URL and Port
+client.SetProxy("http://proxyserver:8888")
+
+// Want to remove proxy setting
+client.RemoveProxy()
+```
+
+#### Retries
+
+Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html)
+to increase retry intervals after each attempt.
+
+Usage example:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Retries are configured per client
+client.
+ // Set retry count to non zero to enable retries
+ SetRetryCount(3).
+ // You can override initial retry wait time.
+ // Default is 100 milliseconds.
+ SetRetryWaitTime(5 * time.Second).
+ // MaxWaitTime can be overridden as well.
+ // Default is 2 seconds.
+ SetRetryMaxWaitTime(20 * time.Second).
+ // SetRetryAfter sets callback to calculate wait time between retries.
+ // Default (nil) implies exponential backoff with jitter
+ SetRetryAfter(func(client *resty.Client, resp *resty.Response) (time.Duration, error) {
+ return 0, errors.New("quota exceeded")
+ })
+```
+
+Above setup will result in resty retrying requests returned non nil error up to
+3 times with delay increased after each attempt.
+
+You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc):
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.AddRetryCondition(
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ func(r *resty.Response, err error) bool {
+ return r.StatusCode() == http.StatusTooManyRequests
+ },
+)
+```
+
+Above example will make resty retry requests ended with `429 Too Many Requests`
+status code.
+
+Multiple retry conditions can be added.
+
+It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
+implemented. [Reference](retry_test.go).
+
+#### Allow GET request with Payload
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Allow GET request with Payload. This is disabled by default.
+client.SetAllowGetMethodPayload(true)
+```
+
+#### Wanna Multiple Clients
+
+```go
+// Here you go!
+// Client 1
+client1 := resty.New()
+client1.R().Get("http://httpbin.org")
+// ...
+
+// Client 2
+client2 := resty.New()
+client2.R().Head("http://httpbin.org")
+// ...
+
+// Bend it as per your need!!!
+```
+
+#### Remaining Client Settings & its Options
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Unique settings at Client level
+//--------------------------------
+// Enable debug mode
+client.SetDebug(true)
+
+// Assign Client TLSClientConfig
+// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+
+// or One can disable security check (https)
+client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+
+// Set client timeout as per your need
+client.SetTimeout(1 * time.Minute)
+
+
+// You can override all below settings and options at request level if you want to
+//--------------------------------------------------------------------------------
+// Host URL for all request. So you can use relative URL in the request
+client.SetHostURL("http://httpbin.org")
+
+// Headers for all request
+client.SetHeader("Accept", "application/json")
+client.SetHeaders(map[string]string{
+ "Content-Type": "application/json",
+ "User-Agent": "My custom User Agent String",
+ })
+
+// Cookies for all request
+client.SetCookie(&http.Cookie{
+ Name:"go-resty",
+ Value:"This is cookie value",
+ Path: "/",
+ Domain: "sample.com",
+ MaxAge: 36000,
+ HttpOnly: true,
+ Secure: false,
+ })
+client.SetCookies(cookies)
+
+// URL query parameters for all request
+client.SetQueryParam("user_id", "00001")
+client.SetQueryParams(map[string]string{ // sample of those who use this manner
+ "api_key": "api-key-here",
+ "api_secert": "api-secert",
+ })
+client.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+
+// Form data for all request. Typically used with POST and PUT
+client.SetFormData(map[string]string{
+ "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+ })
+
+// Basic Auth for all request
+client.SetBasicAuth("myuser", "mypass")
+
+// Bearer Auth Token for all request
+client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+
+// Enabling Content length value for all request
+client.SetContentLength(true)
+
+// Registering global Error object structure for JSON/XML request
+client.SetError(&Error{}) // or resty.SetError(Error{})
+```
+
+#### Unix Socket
+
+```go
+unixSocket := "/var/run/my_socket.sock"
+
+// Create a Go's http.Transport so we can set it in resty.
+transport := http.Transport{
+ Dial: func(_, _ string) (net.Conn, error) {
+ return net.Dial("unix", unixSocket)
+ },
+}
+
+// Create a Resty Client
+client := resty.New()
+
+// Set the previous transport that we created, set the scheme of the communication to the
+// socket and set the unixSocket as the HostURL.
+client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
+
+// No need to write the host's URL on the request, just the path.
+client.R().Get("/index.html")
+```
+
+#### Bazel Support
+
+Resty can be built, tested and depended upon via [Bazel](https://bazel.build).
+For example, to run all tests:
+
+```shell
+bazel test :resty_test
+```
+
+#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library
+
+In order to mock the http requests when testing your application you
+could use the `httpmock` library.
+
+When using the default resty client, you should pass the client to the library as follow:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Get the underlying HTTP Client and set it to Mock
+httpmock.ActivateNonDefault(client.GetClient())
+```
+
+More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example).
+
+## Versioning
+
+Resty releases versions according to [Semantic Versioning](http://semver.org)
+
+ * Resty v2 does not use `gopkg.in` service for library versioning.
+ * Resty fully adapted to `go mod` capabilities since `v1.10.0` release.
+ * Resty v1 series was using `gopkg.in` to provide versioning. `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`.
+ * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug.
+
+## Contribution
+
+I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests.
+
+BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me.
+
+## Creator
+
+[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com)
+
+## Core Team
+
+Have a look on [Members](https://github.com/orgs/go-resty/people) page.
+
+## Contributors
+
+Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page.
+
+## License
+
+Resty released under MIT license, refer [LICENSE](LICENSE) file.
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/WORKSPACE b/test/performance/vendor/github.com/go-resty/resty/v2/WORKSPACE
new file mode 100644
index 000000000..9ef03e95a
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/WORKSPACE
@@ -0,0 +1,31 @@
+workspace(name = "resty")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ ],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(version = "1.16")
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/client.go b/test/performance/vendor/github.com/go-resty/resty/v2/client.go
new file mode 100644
index 000000000..1a03efa37
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/client.go
@@ -0,0 +1,1115 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/url"
+ "reflect"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // MethodGet HTTP method
+ MethodGet = "GET"
+
+ // MethodPost HTTP method
+ MethodPost = "POST"
+
+ // MethodPut HTTP method
+ MethodPut = "PUT"
+
+ // MethodDelete HTTP method
+ MethodDelete = "DELETE"
+
+ // MethodPatch HTTP method
+ MethodPatch = "PATCH"
+
+ // MethodHead HTTP method
+ MethodHead = "HEAD"
+
+ // MethodOptions HTTP method
+ MethodOptions = "OPTIONS"
+)
+
+var (
+ hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent")
+ hdrAcceptKey = http.CanonicalHeaderKey("Accept")
+ hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type")
+ hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length")
+ hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding")
+ hdrLocationKey = http.CanonicalHeaderKey("Location")
+
+ plainTextType = "text/plain; charset=utf-8"
+ jsonContentType = "application/json"
+ formContentType = "application/x-www-form-urlencoded"
+
+ jsonCheck = regexp.MustCompile(`(?i:(application|text)/(json|.*\+json|json\-.*)(;|$))`)
+ xmlCheck = regexp.MustCompile(`(?i:(application|text)/(xml|.*\+xml)(;|$))`)
+
+ hdrUserAgentValue = "go-resty/" + Version + " (https://github.com/go-resty/resty)"
+ bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
+)
+
+type (
+ // RequestMiddleware type is for request middleware, called before a request is sent
+ RequestMiddleware func(*Client, *Request) error
+
+ // ResponseMiddleware type is for response middleware, called after a response has been received
+ ResponseMiddleware func(*Client, *Response) error
+
+ // PreRequestHook type is for the request hook, called right before the request is sent
+ PreRequestHook func(*Client, *http.Request) error
+
+ // RequestLogCallback type is for request logs, called before the request is logged
+ RequestLogCallback func(*RequestLog) error
+
+ // ResponseLogCallback type is for response logs, called before the response is logged
+ ResponseLogCallback func(*ResponseLog) error
+
+ // ErrorHook type is for reacting to request errors, called after all retries were attempted
+ ErrorHook func(*Request, error)
+)
+
+// Client struct is used to create Resty client with client level settings,
+// these settings are applicable to all the request raised from the client.
+//
+// Resty also provides an options to override most of the client settings
+// at request level.
+type Client struct {
+ BaseURL string
+ HostURL string // Deprecated: use BaseURL instead. To be removed in v3.0.0 release.
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ Header http.Header
+ UserInfo *User
+ Token string
+ AuthScheme string
+ Cookies []*http.Cookie
+ Error reflect.Type
+ Debug bool
+ DisableWarn bool
+ AllowGetMethodPayload bool
+ RetryCount int
+ RetryWaitTime time.Duration
+ RetryMaxWaitTime time.Duration
+ RetryConditions []RetryConditionFunc
+ RetryHooks []OnRetryFunc
+ RetryAfter RetryAfterFunc
+ JSONMarshal func(v interface{}) ([]byte, error)
+ JSONUnmarshal func(data []byte, v interface{}) error
+ XMLMarshal func(v interface{}) ([]byte, error)
+ XMLUnmarshal func(data []byte, v interface{}) error
+
+ // HeaderAuthorizationKey is used to set/access Request Authorization header
+ // value when `SetAuthToken` option is used.
+ HeaderAuthorizationKey string
+
+ jsonEscapeHTML bool
+ setContentLength bool
+ closeConnection bool
+ notParseResponse bool
+ trace bool
+ debugBodySizeLimit int64
+ outputDirectory string
+ scheme string
+ log Logger
+ httpClient *http.Client
+ proxyURL *url.URL
+ beforeRequest []RequestMiddleware
+ udBeforeRequest []RequestMiddleware
+ preReqHook PreRequestHook
+ afterResponse []ResponseMiddleware
+ requestLog RequestLogCallback
+ responseLog ResponseLogCallback
+ errorHooks []ErrorHook
+}
+
+// User type is to hold an username and password information
+type User struct {
+ Username, Password string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client methods
+//___________________________________
+
+// SetHostURL method is to set Host URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetHostURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetHostURL("https://myjeeva.com")
+//
+// Deprecated: use SetBaseURL instead. To be removed in v3.0.0 release.
+func (c *Client) SetHostURL(url string) *Client {
+ c.SetBaseURL(url)
+ return c
+}
+
+// SetBaseURL method is to set Base URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetBaseURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetBaseURL("https://myjeeva.com")
+//
+// Since v2.7.0
+func (c *Client) SetBaseURL(url string) *Client {
+ c.BaseURL = strings.TrimRight(url, "/")
+ c.HostURL = c.BaseURL
+ return c
+}
+
+// SetHeader method sets a single header field and its value in the client instance.
+// These headers will be applied to all requests raised from this client instance.
+// Also it can be overridden at request level header options.
+//
+// See `Request.SetHeader` or `Request.SetHeaders`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+func (c *Client) SetHeader(header, value string) *Client {
+ c.Header.Set(header, value)
+ return c
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the client instance.
+// These headers will be applied to all requests raised from this client instance. Also it can be
+// overridden at request level headers options.
+//
+// See `Request.SetHeaders` or `Request.SetHeader`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+func (c *Client) SetHeaders(headers map[string]string) *Client {
+ for h, v := range headers {
+ c.Header.Set(h, v)
+ }
+ return c
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (c *Client) SetHeaderVerbatim(header, value string) *Client {
+ c.Header[header] = []string{value}
+ return c
+}
+
+// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default.
+//
+// For Example: sometimes we don't want to save cookies in api contacting, we can remove the default
+// CookieJar in resty client.
+//
+// client.SetCookieJar(nil)
+func (c *Client) SetCookieJar(jar http.CookieJar) *Client {
+ c.httpClient.Jar = jar
+ return c
+}
+
+// SetCookie method appends a single cookie in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// client.SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+func (c *Client) SetCookie(hc *http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, hc)
+ return c
+}
+
+// SetCookies method sets an array of cookies in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty
+// client.SetCookies(cookies)
+func (c *Client) SetCookies(cs []*http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, cs...)
+ return c
+}
+
+// SetQueryParam method sets single parameter and its value in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from
+// this client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParam` or `Request.SetQueryParams`.
+// client.
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+func (c *Client) SetQueryParam(param, value string) *Client {
+ c.QueryParam.Set(param, value)
+ return c
+}
+
+// SetQueryParams method sets multiple parameters and their values at one go in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from this
+// client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParams` or `Request.SetQueryParam`.
+// client.SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+func (c *Client) SetQueryParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetQueryParam(p, v)
+ }
+ return c
+}
+
+// SetFormData method sets Form parameters and their values in the client instance.
+// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as
+// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from
+// this client instance. Also it can be overridden at request level form data.
+//
+// See `Request.SetFormData`.
+// client.SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+func (c *Client) SetFormData(data map[string]string) *Client {
+ for k, v := range data {
+ c.FormData.Set(k, v)
+ }
+ return c
+}
+
+// SetBasicAuth method sets the basic authentication header in the HTTP request. For Example:
+// Authorization: Basic
+//
+// For Example: To set the header for username "go-resty" and password "welcome"
+// client.SetBasicAuth("go-resty", "welcome")
+//
+// This basic auth information gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetBasicAuth`.
+func (c *Client) SetBasicAuth(username, password string) *Client {
+ c.UserInfo = &User{Username: username, Password: password}
+ return c
+}
+
+// SetAuthToken method sets the auth token of the `Authorization` header for all HTTP requests.
+// The default auth scheme is `Bearer`, it can be customized with the method `SetAuthScheme`. For Example:
+// Authorization:
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This auth token gets added to all the requests rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthToken(token string) *Client {
+ c.Token = token
+ return c
+}
+
+// SetAuthScheme method sets the auth scheme type in the HTTP request. For Example:
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.SetAuthScheme("OAuth")
+//
+// This auth scheme gets added to all the requests rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about auth schemes can be found in RFC7235 which is linked to below
+// along with the page containing the currently defined official authentication schemes:
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthScheme(scheme string) *Client {
+ c.AuthScheme = scheme
+ return c
+}
+
+// R method creates a new request instance, its used for Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) R() *Request {
+ r := &Request{
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+
+ client: c,
+ multipartFiles: []*File{},
+ multipartFields: []*MultipartField{},
+ PathParams: map[string]string{},
+ jsonEscapeHTML: true,
+ }
+ return r
+}
+
+// NewRequest is an alias for method `R()`. Creates a new request instance, its used for
+// Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) NewRequest() *Request {
+ return c.R()
+}
+
+// OnBeforeRequest method appends request middleware into the before request chain.
+// Its gets applied after default Resty request middlewares and before request
+// been sent from Resty to host server.
+// client.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
+// // Now you have access to Client and Request instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnBeforeRequest(m RequestMiddleware) *Client {
+ c.udBeforeRequest = append(c.udBeforeRequest, m)
+ return c
+}
+
+// OnAfterResponse method appends response middleware into the after response chain.
+// Once we receive response from host server, default Resty response middleware
+// gets applied and then user assigened response middlewares applied.
+// client.OnAfterResponse(func(c *resty.Client, r *resty.Response) error {
+// // Now you have access to Client and Response instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnAfterResponse(m ResponseMiddleware) *Client {
+ c.afterResponse = append(c.afterResponse, m)
+ return c
+}
+
+// OnError method adds a callback that will be run whenever a request execution fails.
+// This is called after all retries have been attempted (if any).
+// If there was a response from the server, the error will be wrapped in *ResponseError
+// which has the last response received from the server.
+//
+// client.OnError(func(req *resty.Request, err error) {
+// if v, ok := err.(*resty.ResponseError); ok {
+// // Do something with v.Response
+// }
+// // Log the error, increment a metric, etc...
+// })
+func (c *Client) OnError(h ErrorHook) *Client {
+ c.errorHooks = append(c.errorHooks, h)
+ return c
+}
+
+// SetPreRequestHook method sets the given pre-request function into resty client.
+// It is called right before the request is fired.
+//
+// Note: Only one pre-request hook can be registered. Use `client.OnBeforeRequest` for mutilple.
+func (c *Client) SetPreRequestHook(h PreRequestHook) *Client {
+ if c.preReqHook != nil {
+ c.log.Warnf("Overwriting an existing pre-request hook: %s", functionName(h))
+ }
+ c.preReqHook = h
+ return c
+}
+
+// SetDebug method enables the debug mode on Resty client. Client logs details of every request and response.
+// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
+// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
+// client.SetDebug(true)
+func (c *Client) SetDebug(d bool) *Client {
+ c.Debug = d
+ return c
+}
+
+// SetDebugBodyLimit sets the maximum size for which the response and request body will be logged in debug mode.
+// client.SetDebugBodyLimit(1000000)
+func (c *Client) SetDebugBodyLimit(sl int64) *Client {
+ c.debugBodySizeLimit = sl
+ return c
+}
+
+// OnRequestLog method used to set request log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnRequestLog(rl RequestLogCallback) *Client {
+ if c.requestLog != nil {
+ c.log.Warnf("Overwriting an existing on-request-log callback from=%s to=%s",
+ functionName(c.requestLog), functionName(rl))
+ }
+ c.requestLog = rl
+ return c
+}
+
+// OnResponseLog method used to set response log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnResponseLog(rl ResponseLogCallback) *Client {
+ if c.responseLog != nil {
+ c.log.Warnf("Overwriting an existing on-response-log callback from=%s to=%s",
+ functionName(c.responseLog), functionName(rl))
+ }
+ c.responseLog = rl
+ return c
+}
+
+// SetDisableWarn method disables the warning message on Resty client.
+//
+// For Example: Resty warns the user when BasicAuth used on non-TLS mode.
+// client.SetDisableWarn(true)
+func (c *Client) SetDisableWarn(d bool) *Client {
+ c.DisableWarn = d
+ return c
+}
+
+// SetAllowGetMethodPayload method allows the GET method with payload on Resty client.
+//
+// For Example: Resty allows the user sends request with a payload on HTTP GET method.
+// client.SetAllowGetMethodPayload(true)
+func (c *Client) SetAllowGetMethodPayload(a bool) *Client {
+ c.AllowGetMethodPayload = a
+ return c
+}
+
+// SetLogger method sets given writer for logging Resty request and response details.
+//
+// Compliant to interface `resty.Logger`.
+func (c *Client) SetLogger(l Logger) *Client {
+ c.log = l
+ return c
+}
+
+// SetContentLength method enables the HTTP header `Content-Length` value for every request.
+// By default Resty won't set `Content-Length`.
+// client.SetContentLength(true)
+//
+// Also you have an option to enable for particular request. See `Request.SetContentLength`
+func (c *Client) SetContentLength(l bool) *Client {
+ c.setContentLength = l
+ return c
+}
+
+// SetTimeout method sets timeout for request raised from client.
+// client.SetTimeout(time.Duration(1 * time.Minute))
+func (c *Client) SetTimeout(timeout time.Duration) *Client {
+ c.httpClient.Timeout = timeout
+ return c
+}
+
+// SetError method is to register the global or client common `Error` object into Resty.
+// It is used for automatic unmarshalling if response status code is greater than 399 and
+// content type either JSON or XML. Can be pointer or non-pointer.
+// client.SetError(&Error{})
+// // OR
+// client.SetError(Error{})
+func (c *Client) SetError(err interface{}) *Client {
+ c.Error = typeOf(err)
+ return c
+}
+
+// SetRedirectPolicy method sets the client redirect poilicy. Resty provides ready to use
+// redirect policies. Wanna create one for yourself refer to `redirect.go`.
+//
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+//
+// // Need multiple redirect policies together
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net"))
+func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client {
+ for _, p := range policies {
+ if _, ok := p.(RedirectPolicy); !ok {
+ c.log.Errorf("%v does not implement resty.RedirectPolicy (missing Apply method)",
+ functionName(p))
+ }
+ }
+
+ c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ for _, p := range policies {
+ if err := p.(RedirectPolicy).Apply(req, via); err != nil {
+ return err
+ }
+ }
+ return nil // looks good, go ahead
+ }
+
+ return c
+}
+
+// SetRetryCount method enables retry on Resty client and allows you
+// to set no. of retry count. Resty uses a Backoff mechanism.
+func (c *Client) SetRetryCount(count int) *Client {
+ c.RetryCount = count
+ return c
+}
+
+// SetRetryWaitTime method sets default wait time to sleep before retrying
+// request.
+//
+// Default is 100 milliseconds.
+func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client {
+ c.RetryWaitTime = waitTime
+ return c
+}
+
+// SetRetryMaxWaitTime method sets max wait time to sleep before retrying
+// request.
+//
+// Default is 2 seconds.
+func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client {
+ c.RetryMaxWaitTime = maxWaitTime
+ return c
+}
+
+// SetRetryAfter sets callback to calculate wait time between retries.
+// Default (nil) implies exponential backoff with jitter
+func (c *Client) SetRetryAfter(callback RetryAfterFunc) *Client {
+ c.RetryAfter = callback
+ return c
+}
+
+// AddRetryCondition method adds a retry condition function to array of functions
+// that are checked to determine if the request is retried. The request will
+// retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are applied on all Request made using this Client.
+// For Request specific retry conditions check *Request.AddRetryCondition
+func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client {
+ c.RetryConditions = append(c.RetryConditions, condition)
+ return c
+}
+
+// AddRetryAfterErrorCondition adds the basic condition of retrying after encountering
+// an error from the http response
+//
+// Since v2.6.0
+func (c *Client) AddRetryAfterErrorCondition() *Client {
+ c.AddRetryCondition(func(response *Response, err error) bool {
+ return response.IsError()
+ })
+ return c
+}
+
+// AddRetryHook adds a side-effecting retry hook to an array of hooks
+// that will be executed on each retry.
+//
+// Since v2.6.0
+func (c *Client) AddRetryHook(hook OnRetryFunc) *Client {
+ c.RetryHooks = append(c.RetryHooks, hook)
+ return c
+}
+
+// SetTLSClientConfig method sets TLSClientConfig for underling client Transport.
+//
+// For Example:
+// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+// client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+//
+// // or One can disable security check (https)
+// client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+//
+// Note: This method overwrites existing `TLSClientConfig`.
+func (c *Client) SetTLSClientConfig(config *tls.Config) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ transport.TLSClientConfig = config
+ return c
+}
+
+// SetProxy method sets the Proxy URL and Port for Resty client.
+// client.SetProxy("http://proxyserver:8888")
+//
+// OR Without this `SetProxy` method, you could also set Proxy via environment variable.
+//
+// Refer to godoc `http.ProxyFromEnvironment`.
+func (c *Client) SetProxy(proxyURL string) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ pURL, err := url.Parse(proxyURL)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ c.proxyURL = pURL
+ transport.Proxy = http.ProxyURL(c.proxyURL)
+ return c
+}
+
+// RemoveProxy method removes the proxy configuration from Resty client
+// client.RemoveProxy()
+func (c *Client) RemoveProxy() *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ c.proxyURL = nil
+ transport.Proxy = nil
+ return c
+}
+
+// SetCertificates method helps to set client certificates into Resty conveniently.
+func (c *Client) SetCertificates(certs ...tls.Certificate) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ config.Certificates = append(config.Certificates, certs...)
+ return c
+}
+
+// SetRootCertificate method helps to add one or more root certificates into Resty client
+// client.SetRootCertificate("/path/to/root/pemFile.pem")
+func (c *Client) SetRootCertificate(pemFilePath string) *Client {
+ rootPemData, err := ioutil.ReadFile(pemFilePath)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM(rootPemData)
+ return c
+}
+
+// SetRootCertificateFromString method helps to add one or more root certificates into Resty client
+// client.SetRootCertificateFromString("pem file content")
+func (c *Client) SetRootCertificateFromString(pemContent string) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM([]byte(pemContent))
+ return c
+}
+
+// SetOutputDirectory method sets output directory for saving HTTP response into file.
+// If the output directory not exists then resty creates one. This setting is optional one,
+// if you're planning using absolute path in `Request.SetOutput` and can used together.
+// client.SetOutputDirectory("/save/http/response/here")
+func (c *Client) SetOutputDirectory(dirPath string) *Client {
+ c.outputDirectory = dirPath
+ return c
+}
+
+// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper`
+// compatible interface implementation in the resty client.
+//
+// Note:
+//
+// - If transport is not type of `*http.Transport` then you may not be able to
+// take advantage of some of the Resty client settings.
+//
+// - It overwrites the Resty client transport instance and it's configurations.
+//
+// transport := &http.Transport{
+// // somthing like Proxying to httptest.Server, etc...
+// Proxy: func(req *http.Request) (*url.URL, error) {
+// return url.Parse(server.URL)
+// },
+// }
+//
+// client.SetTransport(transport)
+func (c *Client) SetTransport(transport http.RoundTripper) *Client {
+ if transport != nil {
+ c.httpClient.Transport = transport
+ }
+ return c
+}
+
+// SetScheme method sets custom scheme in the Resty client. It's way to override default.
+// client.SetScheme("http")
+func (c *Client) SetScheme(scheme string) *Client {
+ if !IsStringEmpty(scheme) {
+ c.scheme = strings.TrimSpace(scheme)
+ }
+ return c
+}
+
+// SetCloseConnection method sets variable `Close` in http request struct with the given
+// value. More info: https://golang.org/src/net/http/request.go
+func (c *Client) SetCloseConnection(close bool) *Client {
+ c.closeConnection = close
+ return c
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (c *Client) SetDoNotParseResponse(parse bool) *Client {
+ c.notParseResponse = parse
+ return c
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty client instance.
+// client.SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+// It replaces the value of the key while composing the request URL.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParam(param, value string) *Client {
+ c.PathParams[param] = value
+ return c
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty client instance.
+// client.SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replaces the value of the key while composing the request URL.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetPathParam(p, v)
+ }
+ return c
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (c *Client) SetJSONEscapeHTML(b bool) *Client {
+ c.jsonEscapeHTML = b
+ return c
+}
+
+// EnableTrace method enables the Resty client trace for the requests fired from
+// the client using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New().EnableTrace()
+//
+// resp, err := client.R().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// Also `Request.EnableTrace` available too to get trace info for single request.
+//
+// Since v2.0.0
+func (c *Client) EnableTrace() *Client {
+ c.trace = true
+ return c
+}
+
+// DisableTrace method disables the Resty client trace. Refer to `Client.EnableTrace`.
+//
+// Since v2.0.0
+func (c *Client) DisableTrace() *Client {
+ c.trace = false
+ return c
+}
+
+// IsProxySet method returns the true is proxy is set from resty client otherwise
+// false. By default proxy is set from environment, refer to `http.ProxyFromEnvironment`.
+func (c *Client) IsProxySet() bool {
+ return c.proxyURL != nil
+}
+
+// GetClient method returns the current `http.Client` used by the resty client.
+func (c *Client) GetClient() *http.Client {
+ return c.httpClient
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client Unexported methods
+//_______________________________________________________________________
+
+// Executes method executes the given `Request` object and returns response
+// error.
+func (c *Client) execute(req *Request) (*Response, error) {
+ // Apply Request middleware
+ var err error
+
+ // user defined on before request methods
+ // to modify the *resty.Request object
+ for _, f := range c.udBeforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ // resty middlewares
+ for _, f := range c.beforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if hostHeader := req.Header.Get("Host"); hostHeader != "" {
+ req.RawRequest.Host = hostHeader
+ }
+
+ // call pre-request if defined
+ if c.preReqHook != nil {
+ if err = c.preReqHook(c, req.RawRequest); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if err = requestLogger(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+
+ req.RawRequest.Body = newRequestBodyReleaser(req.RawRequest.Body, req.bodyBuf)
+
+ req.Time = time.Now()
+ resp, err := c.httpClient.Do(req.RawRequest)
+
+ response := &Response{
+ Request: req,
+ RawResponse: resp,
+ }
+
+ if err != nil || req.notParseResponse || c.notParseResponse {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ if !req.isSaveResponse {
+ defer closeq(resp.Body)
+ body := resp.Body
+
+ // GitHub #142 & #187
+ if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 {
+ if _, ok := body.(*gzip.Reader); !ok {
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+ defer closeq(body)
+ }
+ }
+
+ if response.body, err = ioutil.ReadAll(body); err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ response.size = int64(len(response.body))
+ }
+
+ response.setReceivedAt() // after we read the body
+
+ // Apply Response middleware
+ for _, f := range c.afterResponse {
+ if err = f(c, response); err != nil {
+ break
+ }
+ }
+
+ return response, wrapNoRetryErr(err)
+}
+
+// getting TLS client config if not exists then create one
+func (c *Client) tlsConfig() (*tls.Config, error) {
+ transport, err := c.transport()
+ if err != nil {
+ return nil, err
+ }
+ if transport.TLSClientConfig == nil {
+ transport.TLSClientConfig = &tls.Config{}
+ }
+ return transport.TLSClientConfig, nil
+}
+
+// Transport method returns `*http.Transport` currently in use or error
+// in case currently used `transport` is not a `*http.Transport`.
+func (c *Client) transport() (*http.Transport, error) {
+ if transport, ok := c.httpClient.Transport.(*http.Transport); ok {
+ return transport, nil
+ }
+ return nil, errors.New("current transport is not an *http.Transport instance")
+}
+
+// just an internal helper method
+func (c *Client) outputLogTo(w io.Writer) *Client {
+ c.log.(*logger).l.SetOutput(w)
+ return c
+}
+
+// ResponseError is a wrapper for including the server response with an error.
+// Neither the err nor the response should be nil.
+type ResponseError struct {
+ Response *Response
+ Err error
+}
+
+func (e *ResponseError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *ResponseError) Unwrap() error {
+ return e.Err
+}
+
+// Helper to run onErrorHooks hooks.
+// It wraps the error in a ResponseError if the resp is not nil
+// so hooks can access it.
+func (c *Client) onErrorHooks(req *Request, resp *Response, err error) {
+ if err != nil {
+ if resp != nil { // wrap with ResponseError
+ err = &ResponseError{Response: resp, Err: err}
+ }
+ for _, h := range c.errorHooks {
+ h(req, err)
+ }
+ }
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// File struct and its methods
+//_______________________________________________________________________
+
+// File struct represent file information for multipart request
+type File struct {
+ Name string
+ ParamName string
+ io.Reader
+}
+
+// String returns string value of current file details
+func (f *File) String() string {
+ return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// MultipartField struct
+//_______________________________________________________________________
+
+// MultipartField struct represent custom data part for multipart request
+type MultipartField struct {
+ Param string
+ FileName string
+ ContentType string
+ io.Reader
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Unexported package methods
+//_______________________________________________________________________
+
+func createClient(hc *http.Client) *Client {
+ if hc.Transport == nil {
+ hc.Transport = createTransport(nil)
+ }
+
+ c := &Client{ // not setting lang default values
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+ RetryWaitTime: defaultWaitTime,
+ RetryMaxWaitTime: defaultMaxWaitTime,
+ PathParams: make(map[string]string),
+ JSONMarshal: json.Marshal,
+ JSONUnmarshal: json.Unmarshal,
+ XMLMarshal: xml.Marshal,
+ XMLUnmarshal: xml.Unmarshal,
+ HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"),
+
+ jsonEscapeHTML: true,
+ httpClient: hc,
+ debugBodySizeLimit: math.MaxInt32,
+ }
+
+ // Logger
+ c.SetLogger(createLogger())
+
+ // default before request middlewares
+ c.beforeRequest = []RequestMiddleware{
+ parseRequestURL,
+ parseRequestHeader,
+ parseRequestBody,
+ createHTTPRequest,
+ addCredentials,
+ }
+
+ // user defined request middlewares
+ c.udBeforeRequest = []RequestMiddleware{}
+
+ // default after response middlewares
+ c.afterResponse = []ResponseMiddleware{
+ responseLogger,
+ parseResponseBody,
+ saveResponseIntoFile,
+ }
+
+ return c
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/middleware.go b/test/performance/vendor/github.com/go-resty/resty/v2/middleware.go
new file mode 100644
index 000000000..0e8ac2b69
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/middleware.go
@@ -0,0 +1,543 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+)
+
+const debugRequestLogKey = "__restyDebugRequestLog"
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Middleware(s)
+//_______________________________________________________________________
+
+func parseRequestURL(c *Client, r *Request) error {
+ // GitHub #103 Path Params
+ if len(r.PathParams) > 0 {
+ for p, v := range r.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+ if len(c.PathParams) > 0 {
+ for p, v := range c.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+
+ // Parsing request URL
+ reqURL, err := url.Parse(r.URL)
+ if err != nil {
+ return err
+ }
+
+ // If Request.URL is relative path then added c.HostURL into
+ // the request URL otherwise Request.URL will be used as-is
+ if !reqURL.IsAbs() {
+ r.URL = reqURL.String()
+ if len(r.URL) > 0 && r.URL[0] != '/' {
+ r.URL = "/" + r.URL
+ }
+
+ reqURL, err = url.Parse(c.HostURL + r.URL)
+ if err != nil {
+ return err
+ }
+ }
+
+ // GH #407 && #318
+ if reqURL.Scheme == "" && len(c.scheme) > 0 {
+ reqURL.Scheme = c.scheme
+ }
+
+ // Adding Query Param
+ query := make(url.Values)
+ for k, v := range c.QueryParam {
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.QueryParam {
+ // remove query param from client level by key
+ // since overrides happens for that key in the request
+ query.Del(k)
+
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ // GitHub #123 Preserve query string order partially.
+ // Since not feasible in `SetQuery*` resty methods, because
+ // standard package `url.Encode(...)` sorts the query params
+ // alphabetically
+ if len(query) > 0 {
+ if IsStringEmpty(reqURL.RawQuery) {
+ reqURL.RawQuery = query.Encode()
+ } else {
+ reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode()
+ }
+ }
+
+ r.URL = reqURL.String()
+
+ return nil
+}
+
+func parseRequestHeader(c *Client, r *Request) error {
+ hdr := make(http.Header)
+ for k := range c.Header {
+ hdr[k] = append(hdr[k], c.Header[k]...)
+ }
+
+ for k := range r.Header {
+ hdr.Del(k)
+ hdr[k] = append(hdr[k], r.Header[k]...)
+ }
+
+ if IsStringEmpty(hdr.Get(hdrUserAgentKey)) {
+ hdr.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+
+ ct := hdr.Get(hdrContentTypeKey)
+ if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(ct) &&
+ (IsJSONType(ct) || IsXMLType(ct)) {
+ hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey))
+ }
+
+ r.Header = hdr
+
+ return nil
+}
+
+func parseRequestBody(c *Client, r *Request) (err error) {
+ if isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
+ // Handling Multipart
+ if r.isMultiPart && !(r.Method == MethodPatch) {
+ if err = handleMultipart(c, r); err != nil {
+ return
+ }
+
+ goto CL
+ }
+
+ // Handling Form Data
+ if len(c.FormData) > 0 || len(r.FormData) > 0 {
+ handleFormData(c, r)
+
+ goto CL
+ }
+
+ // Handling Request body
+ if r.Body != nil {
+ handleContentType(c, r)
+
+ if err = handleRequestBody(c, r); err != nil {
+ return
+ }
+ }
+ }
+
+CL:
+ // by default resty won't set content length, you can if you want to :)
+ if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil {
+ r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len()))
+ }
+
+ return
+}
+
+func createHTTPRequest(c *Client, r *Request) (err error) {
+ if r.bodyBuf == nil {
+ if reader, ok := r.Body.(io.Reader); ok {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader)
+ } else if c.setContentLength || r.setContentLength {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, http.NoBody)
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil)
+ }
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf)
+ }
+
+ if err != nil {
+ return
+ }
+
+ // Assign close connection option
+ r.RawRequest.Close = c.closeConnection
+
+ // Add headers into http request
+ r.RawRequest.Header = r.Header
+
+ // Add cookies from client instance into http request
+ for _, cookie := range c.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Add cookies from request instance into http request
+ for _, cookie := range r.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Enable trace
+ if c.trace || r.trace {
+ r.clientTrace = &clientTrace{}
+ r.ctx = r.clientTrace.createContext(r.Context())
+ }
+
+ // Use context if it was specified
+ if r.ctx != nil {
+ r.RawRequest = r.RawRequest.WithContext(r.ctx)
+ }
+
+ bodyCopy, err := getBodyCopy(r)
+ if err != nil {
+ return err
+ }
+
+ // assign get body func for the underlying raw request instance
+ r.RawRequest.GetBody = func() (io.ReadCloser, error) {
+ if bodyCopy != nil {
+ return ioutil.NopCloser(bytes.NewReader(bodyCopy.Bytes())), nil
+ }
+ return nil, nil
+ }
+
+ return
+}
+
+func addCredentials(c *Client, r *Request) error {
+ var isBasicAuth bool
+ // Basic Auth
+ if r.UserInfo != nil { // takes precedence
+ r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password)
+ isBasicAuth = true
+ } else if c.UserInfo != nil {
+ r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password)
+ isBasicAuth = true
+ }
+
+ if !c.DisableWarn {
+ if isBasicAuth && !strings.HasPrefix(r.URL, "https") {
+ c.log.Warnf("Using Basic Auth in HTTP mode is not secure, use HTTPS")
+ }
+ }
+
+ // Set the Authorization Header Scheme
+ var authScheme string
+ if !IsStringEmpty(r.AuthScheme) {
+ authScheme = r.AuthScheme
+ } else if !IsStringEmpty(c.AuthScheme) {
+ authScheme = c.AuthScheme
+ } else {
+ authScheme = "Bearer"
+ }
+
+ // Build the Token Auth header
+ if !IsStringEmpty(r.Token) { // takes precedence
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+r.Token)
+ } else if !IsStringEmpty(c.Token) {
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+c.Token)
+ }
+
+ return nil
+}
+
+func requestLogger(c *Client, r *Request) error {
+ if c.Debug {
+ rr := r.RawRequest
+ rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString(c.debugBodySizeLimit)}
+ if c.requestLog != nil {
+ if err := c.requestLog(rl); err != nil {
+ return err
+ }
+ }
+ // fmt.Sprintf("COOKIES:\n%s\n", composeCookies(c.GetClient().Jar, *rr.URL)) +
+
+ reqLog := "\n==============================================================================\n" +
+ "~~~ REQUEST ~~~\n" +
+ fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
+ fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
+ fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(c, r, rl.Header)) +
+ fmt.Sprintf("BODY :\n%v\n", rl.Body) +
+ "------------------------------------------------------------------------------\n"
+
+ r.initValuesMap()
+ r.values[debugRequestLogKey] = reqLog
+ }
+
+ return nil
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Middleware(s)
+//_______________________________________________________________________
+
+func responseLogger(c *Client, res *Response) error {
+ if c.Debug {
+ rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)}
+ if c.responseLog != nil {
+ if err := c.responseLog(rl); err != nil {
+ return err
+ }
+ }
+
+ debugLog := res.Request.values[debugRequestLogKey].(string)
+ debugLog += "~~~ RESPONSE ~~~\n" +
+ fmt.Sprintf("STATUS : %s\n", res.Status()) +
+ fmt.Sprintf("PROTO : %s\n", res.RawResponse.Proto) +
+ fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) +
+ fmt.Sprintf("TIME DURATION: %v\n", res.Time()) +
+ "HEADERS :\n" +
+ composeHeaders(c, res.Request, rl.Header) + "\n"
+ if res.Request.isSaveResponse {
+ debugLog += "BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n"
+ } else {
+ debugLog += fmt.Sprintf("BODY :\n%v\n", rl.Body)
+ }
+ debugLog += "==============================================================================\n"
+
+ c.log.Debugf("%s", debugLog)
+ }
+
+ return nil
+}
+
+func parseResponseBody(c *Client, res *Response) (err error) {
+ if res.StatusCode() == http.StatusNoContent {
+ return
+ }
+ // Handles only JSON or XML content type
+ ct := firstNonEmpty(res.Request.forceContentType, res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType)
+ if IsJSONType(ct) || IsXMLType(ct) {
+ // HTTP status code > 199 and < 300, considered as Result
+ if res.IsSuccess() {
+ res.Request.Error = nil
+ if res.Request.Result != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Result)
+ return
+ }
+ }
+
+ // HTTP status code > 399, considered as Error
+ if res.IsError() {
+ // global error interface
+ if res.Request.Error == nil && c.Error != nil {
+ res.Request.Error = reflect.New(c.Error).Interface()
+ }
+
+ if res.Request.Error != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func handleMultipart(c *Client, r *Request) (err error) {
+ r.bodyBuf = acquireBuffer()
+ w := multipart.NewWriter(r.bodyBuf)
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+
+ for k, v := range r.FormData {
+ for _, iv := range v {
+ if strings.HasPrefix(k, "@") { // file
+ err = addFile(w, k[1:], iv)
+ if err != nil {
+ return
+ }
+ } else { // form value
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // #21 - adding io.Reader support
+ if len(r.multipartFiles) > 0 {
+ for _, f := range r.multipartFiles {
+ err = addFileReader(w, f)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ // GitHub #130 adding multipart field support with content type
+ if len(r.multipartFields) > 0 {
+ for _, mf := range r.multipartFields {
+ if err = addMultipartFormField(w, mf); err != nil {
+ return
+ }
+ }
+ }
+
+ r.Header.Set(hdrContentTypeKey, w.FormDataContentType())
+ err = w.Close()
+
+ return
+}
+
+func handleFormData(c *Client, r *Request) {
+ formData := url.Values{}
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.FormData {
+ // remove form data field from client level by key
+ // since overrides happens for that key in the request
+ formData.Del(k)
+
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode()))
+ r.Header.Set(hdrContentTypeKey, formContentType)
+ r.isFormData = true
+}
+
+func handleContentType(c *Client, r *Request) {
+ contentType := r.Header.Get(hdrContentTypeKey)
+ if IsStringEmpty(contentType) {
+ contentType = DetectContentType(r.Body)
+ r.Header.Set(hdrContentTypeKey, contentType)
+ }
+}
+
+func handleRequestBody(c *Client, r *Request) (err error) {
+ var bodyBytes []byte
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ r.bodyBuf = nil
+
+ if reader, ok := r.Body.(io.Reader); ok {
+ if c.setContentLength || r.setContentLength { // keep backward compatibility
+ r.bodyBuf = acquireBuffer()
+ _, err = r.bodyBuf.ReadFrom(reader)
+ r.Body = nil
+ } else {
+ // Otherwise buffer less processing for `io.Reader`, sounds good.
+ return
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ bodyBytes = b
+ } else if s, ok := r.Body.(string); ok {
+ bodyBytes = []byte(s)
+ } else if IsJSONType(contentType) &&
+ (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) {
+ r.bodyBuf, err = jsonMarshal(c, r, r.Body)
+ if err != nil {
+ return
+ }
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ bodyBytes, err = c.XMLMarshal(r.Body)
+ if err != nil {
+ return
+ }
+ }
+
+ if bodyBytes == nil && r.bodyBuf == nil {
+ err = errors.New("unsupported 'Body' type/value")
+ }
+
+ // if any errors during body bytes handling, return it
+ if err != nil {
+ return
+ }
+
+ // []byte into Buffer
+ if bodyBytes != nil && r.bodyBuf == nil {
+ r.bodyBuf = acquireBuffer()
+ _, _ = r.bodyBuf.Write(bodyBytes)
+ }
+
+ return
+}
+
+func saveResponseIntoFile(c *Client, res *Response) error {
+ if res.Request.isSaveResponse {
+ file := ""
+
+ if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) {
+ file += c.outputDirectory + string(filepath.Separator)
+ }
+
+ file = filepath.Clean(file + res.Request.outputFile)
+ if err := createDirectory(filepath.Dir(file)); err != nil {
+ return err
+ }
+
+ outFile, err := os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer closeq(outFile)
+
+ // io.Copy reads maximum 32kb size, it is perfect for large file download too
+ defer closeq(res.RawResponse.Body)
+
+ written, err := io.Copy(outFile, res.RawResponse.Body)
+ if err != nil {
+ return err
+ }
+
+ res.size = written
+ }
+
+ return nil
+}
+
+func getBodyCopy(r *Request) (*bytes.Buffer, error) {
+ // If r.bodyBuf present, return the copy
+ if r.bodyBuf != nil {
+ return bytes.NewBuffer(r.bodyBuf.Bytes()), nil
+ }
+
+ // Maybe body is `io.Reader`.
+ // Note: Resty user have to watchout for large body size of `io.Reader`
+ if r.RawRequest.Body != nil {
+ b, err := ioutil.ReadAll(r.RawRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Restore the Body
+ closeq(r.RawRequest.Body)
+ r.RawRequest.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+
+ // Return the Body bytes
+ return bytes.NewBuffer(b), nil
+ }
+ return nil, nil
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/redirect.go b/test/performance/vendor/github.com/go-resty/resty/v2/redirect.go
new file mode 100644
index 000000000..7d7e43bc1
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/redirect.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+type (
+ // RedirectPolicy to regulate the redirects in the resty client.
+ // Objects implementing the RedirectPolicy interface can be registered as
+ //
+ // Apply function should return nil to continue the redirect jounery, otherwise
+ // return error to stop the redirect.
+ RedirectPolicy interface {
+ Apply(req *http.Request, via []*http.Request) error
+ }
+
+ // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy.
+ // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f.
+ RedirectPolicyFunc func(*http.Request, []*http.Request) error
+)
+
+// Apply calls f(req, via).
+func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error {
+ return f(req, via)
+}
+
+// NoRedirectPolicy is used to disable redirects in the HTTP client
+// resty.SetRedirectPolicy(NoRedirectPolicy())
+func NoRedirectPolicy() RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ return errors.New("auto redirect is disabled")
+ })
+}
+
+// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client.
+// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if len(via) >= noOfRedirect {
+ return fmt.Errorf("stopped after %d redirects", noOfRedirect)
+ }
+ checkHostAndAddHeaders(req, via[0])
+ return nil
+ })
+}
+
+// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client.
+// Redirect is allowed for only mentioned host in the policy.
+// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy {
+ hosts := make(map[string]bool)
+ for _, h := range hostnames {
+ hosts[strings.ToLower(h)] = true
+ }
+
+ fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if ok := hosts[getHostname(req.URL.Host)]; !ok {
+ return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy")
+ }
+
+ return nil
+ })
+
+ return fn
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+func getHostname(host string) (hostname string) {
+ if strings.Index(host, ":") > 0 {
+ host, _, _ = net.SplitHostPort(host)
+ }
+ hostname = strings.ToLower(host)
+ return
+}
+
+// By default Golang will not redirect request headers
+// after go throughing various discussion comments from thread
+// https://github.com/golang/go/issues/4800
+// Resty will add all the headers during a redirect for the same host
+func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) {
+ curHostname := getHostname(cur.URL.Host)
+ preHostname := getHostname(pre.URL.Host)
+ if strings.EqualFold(curHostname, preHostname) {
+ for key, val := range pre.Header {
+ cur.Header[key] = val
+ }
+ } else { // only library User-Agent header is added
+ cur.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/request.go b/test/performance/vendor/github.com/go-resty/resty/v2/request.go
new file mode 100644
index 000000000..672df88c3
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/request.go
@@ -0,0 +1,896 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request struct and methods
+//_______________________________________________________________________
+
+// Request struct is used to compose and fire individual request from
+// resty client. Request provides an options to override client level
+// settings and also an options for the request composition.
+type Request struct {
+ URL string
+ Method string
+ Token string
+ AuthScheme string
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ Header http.Header
+ Time time.Time
+ Body interface{}
+ Result interface{}
+ Error interface{}
+ RawRequest *http.Request
+ SRV *SRVRecord
+ UserInfo *User
+ Cookies []*http.Cookie
+
+ // Attempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ //
+ // Since v2.4.0
+ Attempt int
+
+ isMultiPart bool
+ isFormData bool
+ setContentLength bool
+ isSaveResponse bool
+ notParseResponse bool
+ jsonEscapeHTML bool
+ trace bool
+ outputFile string
+ fallbackContentType string
+ forceContentType string
+ ctx context.Context
+ values map[string]interface{}
+ client *Client
+ bodyBuf *bytes.Buffer
+ clientTrace *clientTrace
+ multipartFiles []*File
+ multipartFields []*MultipartField
+ retryConditions []RetryConditionFunc
+}
+
+// Context method returns the Context if its already set in request
+// otherwise it creates new one using `context.Background()`.
+func (r *Request) Context() context.Context {
+ if r.ctx == nil {
+ return context.Background()
+ }
+ return r.ctx
+}
+
+// SetContext method sets the context.Context for current Request. It allows
+// to interrupt the request execution if ctx.Done() channel is closed.
+// See https://blog.golang.org/context article and the "context" package
+// documentation.
+func (r *Request) SetContext(ctx context.Context) *Request {
+ r.ctx = ctx
+ return r
+}
+
+// SetHeader method is to set a single header field and its value in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`.
+// client.R().
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeader(header, value string) *Request {
+ r.Header.Set(header, value)
+ return r
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.R().
+// SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaders(headers map[string]string) *Request {
+ for h, v := range headers {
+ r.SetHeader(h, v)
+ }
+ return r
+}
+
+// SetHeaderMultiValues sets multiple headers fields and its values is list of strings at one go in the current request.
+//
+// For Example: To set `Accept` as `text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8`
+//
+// client.R().
+// SetHeaderMultiValues(map[string][]string{
+// "Accept": []string{"text/html", "application/xhtml+xml", "application/xml;q=0.9", "image/webp", "*/*;q=0.8"},
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaderMultiValues(headers map[string][]string) *Request {
+ for key, values := range headers {
+ r.SetHeader(key, strings.Join(values, ", "))
+ }
+ return r
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (r *Request) SetHeaderVerbatim(header, value string) *Request {
+ r.Header[header] = []string{value}
+ return r
+}
+
+// SetQueryParam method sets single parameter and its value in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParam(param, value string) *Request {
+ r.QueryParam.Set(param, value)
+ return r
+}
+
+// SetQueryParams method sets multiple parameters and its values at one go in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetQueryParam(p, v)
+ }
+ return r
+}
+
+// SetQueryParamsFromValues method appends multiple parameters with multi-value
+// (`url.Values`) at one go in the current request. It will be formed as
+// query string for the request.
+//
+// For Example: `status=pending&status=approved&status=open` in the URL after `?` mark.
+// client.R().
+// SetQueryParamsFromValues(url.Values{
+// "status": []string{"pending", "approved", "open"},
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParamsFromValues(params url.Values) *Request {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ return r
+}
+
+// SetQueryString method provides ability to use string as an input to set URL query string for the request.
+//
+// Using String as an input
+// client.R().
+// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+func (r *Request) SetQueryString(query string) *Request {
+ params, err := url.ParseQuery(strings.TrimSpace(query))
+ if err == nil {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ } else {
+ r.client.log.Errorf("%v", err)
+ }
+ return r
+}
+
+// SetFormData method sets Form parameters and their values in the current request.
+// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as
+// `application/x-www-form-urlencoded`.
+// client.R().
+// SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r.FormData.Set(k, v)
+ }
+ return r
+}
+
+// SetFormDataFromValues method appends multiple form parameters with multi-value
+// (`url.Values`) at one go in the current request.
+// client.R().
+// SetFormDataFromValues(url.Values{
+// "search_criteria": []string{"book", "glass", "pencil"},
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormDataFromValues(data url.Values) *Request {
+ for k, v := range data {
+ for _, kv := range v {
+ r.FormData.Add(k, kv)
+ }
+ }
+ return r
+}
+
+// SetBody method sets the request body for the request. It supports various realtime needs as easy.
+// We can say its quite handy or powerful. Supported request body data types is `string`,
+// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer.
+// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`.
+//
+// Note: `io.Reader` is processed as bufferless mode while sending request.
+//
+// For Example: Struct as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(User{
+// Username: "jeeva@myjeeva.com",
+// Password: "welcome2resty",
+// })
+//
+// Map as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(map[string]interface{}{
+// "username": "jeeva@myjeeva.com",
+// "password": "welcome2resty",
+// "address": &Address{
+// Address1: "1111 This is my street",
+// Address2: "Apt 201",
+// City: "My City",
+// State: "My State",
+// ZipCode: 00000,
+// },
+// })
+//
+// String as a body input. Suitable for any need as a string input.
+// client.R().
+// SetBody(`{
+// "username": "jeeva@getrightcare.com",
+// "password": "admin"
+// }`)
+//
+// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc.
+// client.R().
+// SetBody([]byte("This is my raw request, sent as-is"))
+func (r *Request) SetBody(body interface{}) *Request {
+ r.Body = body
+ return r
+}
+
+// SetResult method is to register the response `Result` object for automatic unmarshalling for the request,
+// if response status code is between 200 and 299 and content type either JSON or XML.
+//
+// Note: Result object can be pointer or non-pointer.
+// client.R().SetResult(&AuthToken{})
+// // OR
+// client.R().SetResult(AuthToken{})
+//
+// Accessing a result value from response instance.
+// response.Result().(*AuthToken)
+func (r *Request) SetResult(res interface{}) *Request {
+ r.Result = getPointer(res)
+ return r
+}
+
+// SetError method is to register the request `Error` object for automatic unmarshalling for the request,
+// if response status code is greater than 399 and content type either JSON or XML.
+//
+// Note: Error object can be pointer or non-pointer.
+// client.R().SetError(&AuthError{})
+// // OR
+// client.R().SetError(AuthError{})
+//
+// Accessing a error value from response instance.
+// response.Error().(*AuthError)
+func (r *Request) SetError(err interface{}) *Request {
+ r.Error = getPointer(err)
+ return r
+}
+
+// SetFile method is to set single file field name and its path for multipart upload.
+// client.R().
+// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf")
+func (r *Request) SetFile(param, filePath string) *Request {
+ r.isMultiPart = true
+ r.FormData.Set("@"+param, filePath)
+ return r
+}
+
+// SetFiles method is to set multiple file field name and its path for multipart upload.
+// client.R().
+// SetFiles(map[string]string{
+// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf",
+// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf",
+// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf",
+// })
+func (r *Request) SetFiles(files map[string]string) *Request {
+ r.isMultiPart = true
+ for f, fp := range files {
+ r.FormData.Set("@"+f, fp)
+ }
+ return r
+}
+
+// SetFileReader method is to set single file using io.Reader for multipart upload.
+// client.R().
+// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)).
+// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes))
+func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFiles = append(r.multipartFiles, &File{
+ Name: fileName,
+ ParamName: param,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFormData method allows simple form data to be attached to the request as `multipart:form-data`
+func (r *Request) SetMultipartFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r = r.SetMultipartField(k, "", "", strings.NewReader(v))
+ }
+
+ return r
+}
+
+// SetMultipartField method is to set custom data using io.Reader for multipart upload.
+func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, &MultipartField{
+ Param: param,
+ FileName: fileName,
+ ContentType: contentType,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload.
+//
+// For Example:
+// client.R().SetMultipartFields(
+// &resty.MultipartField{
+// Param: "uploadManifest1",
+// FileName: "upload-file-1.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`),
+// },
+// &resty.MultipartField{
+// Param: "uploadManifest2",
+// FileName: "upload-file-2.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`),
+// })
+//
+// If you have slice already, then simply call-
+// client.R().SetMultipartFields(fields...)
+func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, fields...)
+ return r
+}
+
+// SetContentLength method sets the HTTP header `Content-Length` value for current request.
+// By default Resty won't set `Content-Length`. Also you have an option to enable for every
+// request.
+//
+// See `Client.SetContentLength`
+// client.R().SetContentLength(true)
+func (r *Request) SetContentLength(l bool) *Request {
+ r.setContentLength = l
+ return r
+}
+
+// SetBasicAuth method sets the basic authentication header in the current HTTP request.
+//
+// For Example:
+// Authorization: Basic
+//
+// To set the header for username "go-resty" and password "welcome"
+// client.R().SetBasicAuth("go-resty", "welcome")
+//
+// This method overrides the credentials set by method `Client.SetBasicAuth`.
+func (r *Request) SetBasicAuth(username, password string) *Request {
+ r.UserInfo = &User{Username: username, Password: password}
+ return r
+}
+
+// SetAuthToken method sets the auth token header(Default Scheme: Bearer) in the current HTTP request. Header example:
+// Authorization: Bearer
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This method overrides the Auth token set by method `Client.SetAuthToken`.
+func (r *Request) SetAuthToken(token string) *Request {
+ r.Token = token
+ return r
+}
+
+// SetAuthScheme method sets the auth token scheme type in the HTTP request. For Example:
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.R().SetAuthScheme("OAuth")
+//
+// This auth header scheme gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about Auth schemes can be found in RFC7235 which is linked to below along with the page containing
+// the currently defined official authentication schemes:
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// This method overrides the Authorization scheme set by method `Client.SetAuthScheme`.
+func (r *Request) SetAuthScheme(scheme string) *Request {
+ r.AuthScheme = scheme
+ return r
+}
+
+// SetOutput method sets the output file for current HTTP request. Current HTTP response will be
+// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used.
+// If is it relative path then output file goes under the output directory, as mentioned
+// in the `Client.SetOutputDirectory`.
+// client.R().
+// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip").
+// Get("http://bit.ly/1LouEKr")
+//
+// Note: In this scenario `Response.Body` might be nil.
+func (r *Request) SetOutput(file string) *Request {
+ r.outputFile = file
+ r.isSaveResponse = true
+ return r
+}
+
+// SetSRV method sets the details to query the service SRV record and execute the
+// request.
+// client.R().
+// SetSRV(SRVRecord{"web", "testservice.com"}).
+// Get("/get")
+func (r *Request) SetSRV(srv *SRVRecord) *Request {
+ r.SRV = srv
+ return r
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (r *Request) SetDoNotParseResponse(parse bool) *Request {
+ r.notParseResponse = parse
+ return r
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty current request instance.
+// client.R().SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+// It replaces the value of the key while composing the request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParam(param, value string) *Request {
+ r.PathParams[param] = value
+ return r
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty current request instance.
+// client.R().SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replaces the value of the key while composing request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetPathParam(p, v)
+ }
+ return r
+}
+
+// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling
+// when `Content-Type` response header is unavailable.
+func (r *Request) ExpectContentType(contentType string) *Request {
+ r.fallbackContentType = contentType
+ return r
+}
+
+// ForceContentType method provides a strong sense of response `Content-Type` for automatic unmarshalling.
+// Resty gives this a higher priority than the `Content-Type` response header. This means that if both
+// `Request.ForceContentType` is set and the response `Content-Type` is available, `ForceContentType` will win.
+func (r *Request) ForceContentType(contentType string) *Request {
+ r.forceContentType = contentType
+ return r
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (r *Request) SetJSONEscapeHTML(b bool) *Request {
+ r.jsonEscapeHTML = b
+ return r
+}
+
+// SetCookie method appends a single cookie in the current request instance.
+// client.R().SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookie(hc *http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, hc)
+ return r
+}
+
+// SetCookies method sets an array of cookies in the current request instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty's current request
+// client.R().SetCookies(cookies)
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookies(rs []*http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, rs...)
+ return r
+}
+
+// AddRetryCondition method adds a retry condition function to the request's
+// array of functions that are checked to determine if the request is retried.
+// The request will retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are checked before all retry conditions of the client.
+//
+// Since v2.7.0
+func (r *Request) AddRetryCondition(condition RetryConditionFunc) *Request {
+ r.retryConditions = append(r.retryConditions, condition)
+ return r
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP request tracing
+//_______________________________________________________________________
+
+// EnableTrace method enables trace for the current request
+// using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New()
+//
+// resp, err := client.R().EnableTrace().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// See `Client.EnableTrace` available too to get trace info for all requests.
+//
+// Since v2.0.0
+func (r *Request) EnableTrace() *Request {
+ r.trace = true
+ return r
+}
+
+// TraceInfo method returns the trace info for the request.
+// If either the Client or Request EnableTrace function has not been called
+// prior to the request being made, an empty TraceInfo object will be returned.
+//
+// Since v2.0.0
+func (r *Request) TraceInfo() TraceInfo {
+ ct := r.clientTrace
+
+ if ct == nil {
+ return TraceInfo{}
+ }
+
+ ti := TraceInfo{
+ DNSLookup: ct.dnsDone.Sub(ct.dnsStart),
+ TLSHandshake: ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart),
+ ServerTime: ct.gotFirstResponseByte.Sub(ct.gotConn),
+ IsConnReused: ct.gotConnInfo.Reused,
+ IsConnWasIdle: ct.gotConnInfo.WasIdle,
+ ConnIdleTime: ct.gotConnInfo.IdleTime,
+ RequestAttempt: r.Attempt,
+ }
+
+ // Calculate the total time accordingly,
+ // when connection is reused
+ if ct.gotConnInfo.Reused {
+ ti.TotalTime = ct.endTime.Sub(ct.getConn)
+ } else {
+ ti.TotalTime = ct.endTime.Sub(ct.dnsStart)
+ }
+
+ // Only calculate on successful connections
+ if !ct.connectDone.IsZero() {
+ ti.TCPConnTime = ct.connectDone.Sub(ct.dnsDone)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotConn.IsZero() {
+ ti.ConnTime = ct.gotConn.Sub(ct.getConn)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotFirstResponseByte.IsZero() {
+ ti.ResponseTime = ct.endTime.Sub(ct.gotFirstResponseByte)
+ }
+
+ // Capture remote address info when connection is non-nil
+ if ct.gotConnInfo.Conn != nil {
+ ti.RemoteAddr = ct.gotConnInfo.Conn.RemoteAddr()
+ }
+
+ return ti
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP verb method starts here
+//_______________________________________________________________________
+
+// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231.
+func (r *Request) Get(url string) (*Response, error) {
+ return r.Execute(MethodGet, url)
+}
+
+// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231.
+func (r *Request) Head(url string) (*Response, error) {
+ return r.Execute(MethodHead, url)
+}
+
+// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231.
+func (r *Request) Post(url string) (*Response, error) {
+ return r.Execute(MethodPost, url)
+}
+
+// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231.
+func (r *Request) Put(url string) (*Response, error) {
+ return r.Execute(MethodPut, url)
+}
+
+// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231.
+func (r *Request) Delete(url string) (*Response, error) {
+ return r.Execute(MethodDelete, url)
+}
+
+// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231.
+func (r *Request) Options(url string) (*Response, error) {
+ return r.Execute(MethodOptions, url)
+}
+
+// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789.
+func (r *Request) Patch(url string) (*Response, error) {
+ return r.Execute(MethodPatch, url)
+}
+
+// Send method performs the HTTP request using the method and URL already defined
+// for current `Request`.
+// req := client.R()
+// req.Method = resty.GET
+// req.URL = "http://httpbin.org/get"
+// resp, err := client.R().Send()
+func (r *Request) Send() (*Response, error) {
+ return r.Execute(r.Method, r.URL)
+}
+
+// Execute method performs the HTTP request with given HTTP method and URL
+// for current `Request`.
+// resp, err := client.R().Execute(resty.GET, "http://httpbin.org/get")
+func (r *Request) Execute(method, url string) (*Response, error) {
+ var addrs []*net.SRV
+ var resp *Response
+ var err error
+
+ if r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {
+ // No OnError hook here since this is a request validation error
+ return nil, fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method)
+ }
+
+ if r.SRV != nil {
+ _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain)
+ if err != nil {
+ r.client.onErrorHooks(r, nil, err)
+ return nil, err
+ }
+ }
+
+ r.Method = method
+ r.URL = r.selectAddr(addrs, url, 0)
+
+ if r.client.RetryCount == 0 {
+ r.Attempt = 1
+ resp, err = r.client.execute(r)
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+ return resp, unwrapNoRetryErr(err)
+ }
+
+ err = Backoff(
+ func() (*Response, error) {
+ r.Attempt++
+
+ r.URL = r.selectAddr(addrs, url, r.Attempt)
+
+ resp, err = r.client.execute(r)
+ if err != nil {
+ r.client.log.Errorf("%v, Attempt %v", err, r.Attempt)
+ }
+
+ return resp, err
+ },
+ Retries(r.client.RetryCount),
+ WaitTime(r.client.RetryWaitTime),
+ MaxWaitTime(r.client.RetryMaxWaitTime),
+ RetryConditions(append(r.retryConditions, r.client.RetryConditions...)),
+ RetryHooks(r.client.RetryHooks),
+ )
+
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+
+ return resp, unwrapNoRetryErr(err)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// SRVRecord struct
+//_______________________________________________________________________
+
+// SRVRecord struct holds the data to query the SRV record for the
+// following service.
+type SRVRecord struct {
+ Service string
+ Domain string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Unexported methods
+//_______________________________________________________________________
+
+func (r *Request) fmtBodyString(sl int64) (body string) {
+ body = "***** NO CONTENT *****"
+ if !isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) {
+ return
+ }
+
+ if _, ok := r.Body.(io.Reader); ok {
+ body = "***** BODY IS io.Reader *****"
+ return
+ }
+
+ // multipart or form-data
+ if r.isMultiPart || r.isFormData {
+ bodySize := int64(r.bodyBuf.Len())
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ return
+ }
+ body = r.bodyBuf.String()
+ return
+ }
+
+ // request body data
+ if r.Body == nil {
+ return
+ }
+ var prtBodyBytes []byte
+ var err error
+
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ if canJSONMarshal(contentType, kind) {
+ prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ")
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
+ } else if b, ok := r.Body.(string); ok {
+ if IsJSONType(contentType) {
+ bodyBytes := []byte(b)
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ if err = json.Indent(out, bodyBytes, "", " "); err == nil {
+ prtBodyBytes = out.Bytes()
+ }
+ } else {
+ body = b
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ body = fmt.Sprintf("***** BODY IS byte(s) (size - %d) *****", len(b))
+ return
+ }
+
+ if prtBodyBytes != nil && err == nil {
+ body = string(prtBodyBytes)
+ }
+
+ if len(body) > 0 {
+ bodySize := int64(len([]byte(body)))
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ }
+ }
+
+ return
+}
+
+func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string {
+ if addrs == nil {
+ return path
+ }
+
+ idx := attempt % len(addrs)
+ domain := strings.TrimRight(addrs[idx].Target, ".")
+ path = strings.TrimLeft(path, "/")
+
+ return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path)
+}
+
+func (r *Request) initValuesMap() {
+ if r.values == nil {
+ r.values = make(map[string]interface{})
+ }
+}
+
+var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) {
+ buf := acquireBuffer()
+ encoder := json.NewEncoder(buf)
+ encoder.SetEscapeHTML(false)
+ if err := encoder.Encode(v); err != nil {
+ releaseBuffer(buf)
+ return nil, err
+ }
+
+ return buf, nil
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/response.go b/test/performance/vendor/github.com/go-resty/resty/v2/response.go
new file mode 100644
index 000000000..8ae0e10ba
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/response.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response struct and methods
+//_______________________________________________________________________
+
+// Response struct holds response values of executed request.
+type Response struct {
+ Request *Request
+ RawResponse *http.Response
+
+ body []byte
+ size int64
+ receivedAt time.Time
+}
+
+// Body method returns HTTP response as []byte array for the executed request.
+//
+// Note: `Response.Body` might be nil, if `Request.SetOutput` is used.
+func (r *Response) Body() []byte {
+ if r.RawResponse == nil {
+ return []byte{}
+ }
+ return r.body
+}
+
+// Status method returns the HTTP status string for the executed request.
+// Example: 200 OK
+func (r *Response) Status() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Status
+}
+
+// StatusCode method returns the HTTP status code for the executed request.
+// Example: 200
+func (r *Response) StatusCode() int {
+ if r.RawResponse == nil {
+ return 0
+ }
+ return r.RawResponse.StatusCode
+}
+
+// Proto method returns the HTTP response protocol used for the request.
+func (r *Response) Proto() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Proto
+}
+
+// Result method returns the response value as an object if it has one
+func (r *Response) Result() interface{} {
+ return r.Request.Result
+}
+
+// Error method returns the error object if it has one
+func (r *Response) Error() interface{} {
+ return r.Request.Error
+}
+
+// Header method returns the response headers
+func (r *Response) Header() http.Header {
+ if r.RawResponse == nil {
+ return http.Header{}
+ }
+ return r.RawResponse.Header
+}
+
+// Cookies method to access all the response cookies
+func (r *Response) Cookies() []*http.Cookie {
+ if r.RawResponse == nil {
+ return make([]*http.Cookie, 0)
+ }
+ return r.RawResponse.Cookies()
+}
+
+// String method returns the body of the server response as String.
+func (r *Response) String() string {
+ if r.body == nil {
+ return ""
+ }
+ return strings.TrimSpace(string(r.body))
+}
+
+// Time method returns the time of HTTP response time that from request we sent and received a request.
+//
+// See `Response.ReceivedAt` to know when client received response and see `Response.Request.Time` to know
+// when client sent a request.
+func (r *Response) Time() time.Duration {
+ if r.Request.clientTrace != nil {
+ return r.Request.TraceInfo().TotalTime
+ }
+ return r.receivedAt.Sub(r.Request.Time)
+}
+
+// ReceivedAt method returns when response got received from server for the request.
+func (r *Response) ReceivedAt() time.Time {
+ return r.receivedAt
+}
+
+// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header,
+// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size
+// at the client end. You will get actual size of the http response.
+func (r *Response) Size() int64 {
+ return r.size
+}
+
+// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse`
+// option otherwise you get an error as `read err: http: read on closed response body`.
+//
+// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse.
+// Basically you have taken over the control of response parsing from `Resty`.
+func (r *Response) RawBody() io.ReadCloser {
+ if r.RawResponse == nil {
+ return nil
+ }
+ return r.RawResponse.Body
+}
+
+// IsSuccess method returns true if HTTP status `code >= 200 and <= 299` otherwise false.
+func (r *Response) IsSuccess() bool {
+ return r.StatusCode() > 199 && r.StatusCode() < 300
+}
+
+// IsError method returns true if HTTP status `code >= 400` otherwise false.
+func (r *Response) IsError() bool {
+ return r.StatusCode() > 399
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Unexported methods
+//_______________________________________________________________________
+
+func (r *Response) setReceivedAt() {
+ r.receivedAt = time.Now()
+ if r.Request.clientTrace != nil {
+ r.Request.clientTrace.endTime = r.receivedAt
+ }
+}
+
+func (r *Response) fmtBodyString(sl int64) string {
+ if r.body != nil {
+ if int64(len(r.body)) > sl {
+ return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body))
+ }
+ ct := r.Header().Get(hdrContentTypeKey)
+ if IsJSONType(ct) {
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ err := json.Indent(out, r.body, "", " ")
+ if err != nil {
+ return fmt.Sprintf("*** Error: Unable to format response body - \"%s\" ***\n\nLog Body as-is:\n%s", err, r.String())
+ }
+ return out.String()
+ }
+ return r.String()
+ }
+
+ return "***** NO CONTENT *****"
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/resty.go b/test/performance/vendor/github.com/go-resty/resty/v2/resty.go
new file mode 100644
index 000000000..6f9c8b4cd
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/resty.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+// Package resty provides Simple HTTP and REST client library for Go.
+package resty
+
+import (
+ "net"
+ "net/http"
+ "net/http/cookiejar"
+
+ "golang.org/x/net/publicsuffix"
+)
+
+// Version # of resty
+const Version = "2.7.0"
+
+// New method creates a new Resty client.
+func New() *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ })
+}
+
+// NewWithClient method creates a new Resty client with given `http.Client`.
+func NewWithClient(hc *http.Client) *Client {
+ return createClient(hc)
+}
+
+// NewWithLocalAddr method creates a new Resty client with given Local Address
+// to dial from.
+func NewWithLocalAddr(localAddr net.Addr) *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ Transport: createTransport(localAddr),
+ })
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/retry.go b/test/performance/vendor/github.com/go-resty/resty/v2/retry.go
new file mode 100644
index 000000000..00b8514a5
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/retry.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+const (
+ defaultMaxRetries = 3
+ defaultWaitTime = time.Duration(100) * time.Millisecond
+ defaultMaxWaitTime = time.Duration(2000) * time.Millisecond
+)
+
+type (
+ // Option is to create convenient retry options like wait time, max retries, etc.
+ Option func(*Options)
+
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ RetryConditionFunc func(*Response, error) bool
+
+ // OnRetryFunc is for side-effecting functions triggered on retry
+ OnRetryFunc func(*Response, error)
+
+ // RetryAfterFunc returns time to wait before retry
+ // For example, it can parse HTTP Retry-After header
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ // Non-nil error is returned if it is found that request is not retryable
+ // (0, nil) is a special result means 'use default algorithm'
+ RetryAfterFunc func(*Client, *Response) (time.Duration, error)
+
+ // Options struct is used to hold retry settings.
+ Options struct {
+ maxRetries int
+ waitTime time.Duration
+ maxWaitTime time.Duration
+ retryConditions []RetryConditionFunc
+ retryHooks []OnRetryFunc
+ }
+)
+
+// Retries sets the max number of retries
+func Retries(value int) Option {
+ return func(o *Options) {
+ o.maxRetries = value
+ }
+}
+
+// WaitTime sets the default wait time to sleep between requests
+func WaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.waitTime = value
+ }
+}
+
+// MaxWaitTime sets the max wait time to sleep between requests
+func MaxWaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.maxWaitTime = value
+ }
+}
+
+// RetryConditions sets the conditions that will be checked for retry.
+func RetryConditions(conditions []RetryConditionFunc) Option {
+ return func(o *Options) {
+ o.retryConditions = conditions
+ }
+}
+
+// RetryHooks sets the hooks that will be executed after each retry
+func RetryHooks(hooks []OnRetryFunc) Option {
+ return func(o *Options) {
+ o.retryHooks = hooks
+ }
+}
+
+// Backoff retries with increasing timeout duration up until X amount of retries
+// (Default is 3 attempts, Override with option Retries(n))
+func Backoff(operation func() (*Response, error), options ...Option) error {
+ // Defaults
+ opts := Options{
+ maxRetries: defaultMaxRetries,
+ waitTime: defaultWaitTime,
+ maxWaitTime: defaultMaxWaitTime,
+ retryConditions: []RetryConditionFunc{},
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+
+ var (
+ resp *Response
+ err error
+ )
+
+ for attempt := 0; attempt <= opts.maxRetries; attempt++ {
+ resp, err = operation()
+ ctx := context.Background()
+ if resp != nil && resp.Request.ctx != nil {
+ ctx = resp.Request.ctx
+ }
+ if ctx.Err() != nil {
+ return err
+ }
+
+ err1 := unwrapNoRetryErr(err) // raw error, it used for return users callback.
+ needsRetry := err != nil && err == err1 // retry on a few operation errors by default
+
+ for _, condition := range opts.retryConditions {
+ needsRetry = condition(resp, err1)
+ if needsRetry {
+ break
+ }
+ }
+
+ if !needsRetry {
+ return err
+ }
+
+ for _, hook := range opts.retryHooks {
+ hook(resp, err)
+ }
+
+ // Don't need to wait when no retries left.
+ // Still run retry hooks even on last retry to keep compatibility.
+ if attempt == opts.maxRetries {
+ return err
+ }
+
+ waitTime, err2 := sleepDuration(resp, opts.waitTime, opts.maxWaitTime, attempt)
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ return err
+ }
+
+ select {
+ case <-time.After(waitTime):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ return err
+}
+
+func sleepDuration(resp *Response, min, max time.Duration, attempt int) (time.Duration, error) {
+ const maxInt = 1<<31 - 1 // max int for arch 386
+ if max < 0 {
+ max = maxInt
+ }
+ if resp == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ retryAfterFunc := resp.Request.client.RetryAfter
+
+ // Check for custom callback
+ if retryAfterFunc == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ result, err := retryAfterFunc(resp.Request.client, resp)
+ if err != nil {
+ return 0, err // i.e. 'API quota exceeded'
+ }
+ if result == 0 {
+ return jitterBackoff(min, max, attempt), nil
+ }
+ if result < 0 || max < result {
+ result = max
+ }
+ if result < min {
+ result = min
+ }
+ return result, nil
+}
+
+// Return capped exponential backoff with jitter
+// http://www.awsarchitectureblog.com/2015/03/backoff.html
+func jitterBackoff(min, max time.Duration, attempt int) time.Duration {
+ base := float64(min)
+ capLevel := float64(max)
+
+ temp := math.Min(capLevel, base*math.Exp2(float64(attempt)))
+ ri := time.Duration(temp / 2)
+ result := randDuration(ri)
+
+ if result < min {
+ result = min
+ }
+
+ return result
+}
+
+var rnd = newRnd()
+var rndMu sync.Mutex
+
+func randDuration(center time.Duration) time.Duration {
+ rndMu.Lock()
+ defer rndMu.Unlock()
+
+ var ri = int64(center)
+ var jitter = rnd.Int63n(ri)
+ return time.Duration(math.Abs(float64(ri + jitter)))
+}
+
+func newRnd() *rand.Rand {
+ var seed = time.Now().UnixNano()
+ var src = rand.NewSource(seed)
+ return rand.New(src)
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/trace.go b/test/performance/vendor/github.com/go-resty/resty/v2/trace.go
new file mode 100644
index 000000000..23cf70335
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/trace.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http/httptrace"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// TraceInfo struct
+//_______________________________________________________________________
+
+// TraceInfo struct is used provide request trace info such as DNS lookup
+// duration, Connection obtain duration, Server processing duration, etc.
+//
+// Since v2.0.0
+type TraceInfo struct {
+ // DNSLookup is a duration that transport took to perform
+ // DNS lookup.
+ DNSLookup time.Duration
+
+ // ConnTime is a duration that took to obtain a successful connection.
+ ConnTime time.Duration
+
+ // TCPConnTime is a duration that took to obtain the TCP connection.
+ TCPConnTime time.Duration
+
+ // TLSHandshake is a duration that TLS handshake took place.
+ TLSHandshake time.Duration
+
+ // ServerTime is a duration that server took to respond first byte.
+ ServerTime time.Duration
+
+ // ResponseTime is a duration since first response byte from server to
+ // request completion.
+ ResponseTime time.Duration
+
+ // TotalTime is a duration that total request took end-to-end.
+ TotalTime time.Duration
+
+ // IsConnReused is whether this connection has been previously
+ // used for another HTTP request.
+ IsConnReused bool
+
+ // IsConnWasIdle is whether this connection was obtained from an
+ // idle pool.
+ IsConnWasIdle bool
+
+ // ConnIdleTime is a duration how long the connection was previously
+ // idle, if IsConnWasIdle is true.
+ ConnIdleTime time.Duration
+
+ // RequestAttempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ RequestAttempt int
+
+ // RemoteAddr returns the remote network address.
+ RemoteAddr net.Addr
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// ClientTrace struct and its methods
+//_______________________________________________________________________
+
+// tracer struct maps the `httptrace.ClientTrace` hooks into Fields
+// with same naming for easy understanding. Plus additional insights
+// Request.
+type clientTrace struct {
+ getConn time.Time
+ dnsStart time.Time
+ dnsDone time.Time
+ connectDone time.Time
+ tlsHandshakeStart time.Time
+ tlsHandshakeDone time.Time
+ gotConn time.Time
+ gotFirstResponseByte time.Time
+ endTime time.Time
+ gotConnInfo httptrace.GotConnInfo
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Trace unexported methods
+//_______________________________________________________________________
+
+func (t *clientTrace) createContext(ctx context.Context) context.Context {
+ return httptrace.WithClientTrace(
+ ctx,
+ &httptrace.ClientTrace{
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ t.dnsStart = time.Now()
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ t.dnsDone = time.Now()
+ },
+ ConnectStart: func(_, _ string) {
+ if t.dnsDone.IsZero() {
+ t.dnsDone = time.Now()
+ }
+ if t.dnsStart.IsZero() {
+ t.dnsStart = t.dnsDone
+ }
+ },
+ ConnectDone: func(net, addr string, err error) {
+ t.connectDone = time.Now()
+ },
+ GetConn: func(_ string) {
+ t.getConn = time.Now()
+ },
+ GotConn: func(ci httptrace.GotConnInfo) {
+ t.gotConn = time.Now()
+ t.gotConnInfo = ci
+ },
+ GotFirstResponseByte: func() {
+ t.gotFirstResponseByte = time.Now()
+ },
+ TLSHandshakeStart: func() {
+ t.tlsHandshakeStart = time.Now()
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
+ t.tlsHandshakeDone = time.Now()
+ },
+ },
+ )
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/transport.go b/test/performance/vendor/github.com/go-resty/resty/v2/transport.go
new file mode 100644
index 000000000..e15b48c55
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/transport.go
@@ -0,0 +1,35 @@
+// +build go1.13
+
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/transport112.go b/test/performance/vendor/github.com/go-resty/resty/v2/transport112.go
new file mode 100644
index 000000000..fbbbc5911
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/transport112.go
@@ -0,0 +1,34 @@
+// +build !go1.13
+
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/test/performance/vendor/github.com/go-resty/resty/v2/util.go b/test/performance/vendor/github.com/go-resty/resty/v2/util.go
new file mode 100644
index 000000000..1d563befd
--- /dev/null
+++ b/test/performance/vendor/github.com/go-resty/resty/v2/util.go
@@ -0,0 +1,391 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Logger interface
+//_______________________________________________________________________
+
+// Logger interface is to abstract the logging from Resty. Gives control to
+// the Resty users, choice of the logger.
+type Logger interface {
+ Errorf(format string, v ...interface{})
+ Warnf(format string, v ...interface{})
+ Debugf(format string, v ...interface{})
+}
+
+func createLogger() *logger {
+ l := &logger{l: log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)}
+ return l
+}
+
+var _ Logger = (*logger)(nil)
+
+type logger struct {
+ l *log.Logger
+}
+
+func (l *logger) Errorf(format string, v ...interface{}) {
+ l.output("ERROR RESTY "+format, v...)
+}
+
+func (l *logger) Warnf(format string, v ...interface{}) {
+ l.output("WARN RESTY "+format, v...)
+}
+
+func (l *logger) Debugf(format string, v ...interface{}) {
+ l.output("DEBUG RESTY "+format, v...)
+}
+
+func (l *logger) output(format string, v ...interface{}) {
+ if len(v) == 0 {
+ l.l.Print(format)
+ return
+ }
+ l.l.Printf(format, v...)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Helper methods
+//_______________________________________________________________________
+
+// IsStringEmpty method tells whether given string is empty or not
+func IsStringEmpty(str string) bool {
+ return len(strings.TrimSpace(str)) == 0
+}
+
+// DetectContentType method is used to figure out `Request.Body` content type for request header
+func DetectContentType(body interface{}) string {
+ contentType := plainTextType
+ kind := kindOf(body)
+ switch kind {
+ case reflect.Struct, reflect.Map:
+ contentType = jsonContentType
+ case reflect.String:
+ contentType = plainTextType
+ default:
+ if b, ok := body.([]byte); ok {
+ contentType = http.DetectContentType(b)
+ } else if kind == reflect.Slice {
+ contentType = jsonContentType
+ }
+ }
+
+ return contentType
+}
+
+// IsJSONType method is to check JSON content type or not
+func IsJSONType(ct string) bool {
+ return jsonCheck.MatchString(ct)
+}
+
+// IsXMLType method is to check XML content type or not
+func IsXMLType(ct string) bool {
+ return xmlCheck.MatchString(ct)
+}
+
+// Unmarshalc content into object from JSON or XML
+func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) {
+ if IsJSONType(ct) {
+ err = c.JSONUnmarshal(b, d)
+ } else if IsXMLType(ct) {
+ err = c.XMLUnmarshal(b, d)
+ }
+
+ return
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// RequestLog and ResponseLog type
+//_______________________________________________________________________
+
+// RequestLog struct is used to collected information from resty request
+// instance for debug logging. It sent to request log callback before resty
+// actually logs the information.
+type RequestLog struct {
+ Header http.Header
+ Body string
+}
+
+// ResponseLog struct is used to collected information from resty response
+// instance for debug logging. It sent to response log callback before resty
+// actually logs the information.
+type ResponseLog struct {
+ Header http.Header
+ Body string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+// way to disable the HTML escape as opt-in
+func jsonMarshal(c *Client, r *Request, d interface{}) (*bytes.Buffer, error) {
+ if !r.jsonEscapeHTML || !c.jsonEscapeHTML {
+ return noescapeJSONMarshal(d)
+ }
+
+ data, err := c.JSONMarshal(d)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := acquireBuffer()
+ _, _ = buf.Write(data)
+ return buf, nil
+}
+
+func firstNonEmpty(v ...string) string {
+ for _, s := range v {
+ if !IsStringEmpty(s) {
+ return s
+ }
+ }
+ return ""
+}
+
+var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
+
+func escapeQuotes(s string) string {
+ return quoteEscaper.Replace(s)
+}
+
+func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader {
+ hdr := make(textproto.MIMEHeader)
+
+ var contentDispositionValue string
+ if IsStringEmpty(fileName) {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"`, param)
+ } else {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ param, escapeQuotes(fileName))
+ }
+ hdr.Set("Content-Disposition", contentDispositionValue)
+
+ if !IsStringEmpty(contentType) {
+ hdr.Set(hdrContentTypeKey, contentType)
+ }
+ return hdr
+}
+
+func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error {
+ partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType))
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, mf.Reader)
+ return err
+}
+
+func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error {
+ // Auto detect actual multipart content type
+ cbuf := make([]byte, 512)
+ size, err := r.Read(cbuf)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
+ if err != nil {
+ return err
+ }
+
+ if _, err = partWriter.Write(cbuf[:size]); err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, r)
+ return err
+}
+
+func addFile(w *multipart.Writer, fieldName, path string) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer closeq(file)
+ return writeMultipartFormFile(w, fieldName, filepath.Base(path), file)
+}
+
+func addFileReader(w *multipart.Writer, f *File) error {
+ return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader)
+}
+
+func getPointer(v interface{}) interface{} {
+ vv := valueOf(v)
+ if vv.Kind() == reflect.Ptr {
+ return v
+ }
+ return reflect.New(vv.Type()).Interface()
+}
+
+func isPayloadSupported(m string, allowMethodGet bool) bool {
+ return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet))
+}
+
+func typeOf(i interface{}) reflect.Type {
+ return indirect(valueOf(i)).Type()
+}
+
+func valueOf(i interface{}) reflect.Value {
+ return reflect.ValueOf(i)
+}
+
+func indirect(v reflect.Value) reflect.Value {
+ return reflect.Indirect(v)
+}
+
+func kindOf(v interface{}) reflect.Kind {
+ return typeOf(v).Kind()
+}
+
+func createDirectory(dir string) (err error) {
+ if _, err = os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func canJSONMarshal(contentType string, kind reflect.Kind) bool {
+ return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice)
+}
+
+func functionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func acquireBuffer() *bytes.Buffer {
+ return bufPool.Get().(*bytes.Buffer)
+}
+
+func releaseBuffer(buf *bytes.Buffer) {
+ if buf != nil {
+ buf.Reset()
+ bufPool.Put(buf)
+ }
+}
+
+// requestBodyReleaser wraps requests's body and implements custom Close for it.
+// The Close method closes original body and releases request body back to sync.Pool.
+type requestBodyReleaser struct {
+ releaseOnce sync.Once
+ reqBuf *bytes.Buffer
+ io.ReadCloser
+}
+
+func newRequestBodyReleaser(respBody io.ReadCloser, reqBuf *bytes.Buffer) io.ReadCloser {
+ if reqBuf == nil {
+ return respBody
+ }
+
+ return &requestBodyReleaser{
+ reqBuf: reqBuf,
+ ReadCloser: respBody,
+ }
+}
+
+func (rr *requestBodyReleaser) Close() error {
+ err := rr.ReadCloser.Close()
+ rr.releaseOnce.Do(func() {
+ releaseBuffer(rr.reqBuf)
+ })
+
+ return err
+}
+
+func closeq(v interface{}) {
+ if c, ok := v.(io.Closer); ok {
+ silently(c.Close())
+ }
+}
+
+func silently(_ ...interface{}) {}
+
+func composeHeaders(c *Client, r *Request, hdrs http.Header) string {
+ str := make([]string, 0, len(hdrs))
+ for _, k := range sortHeaderKeys(hdrs) {
+ var v string
+ if k == "Cookie" {
+ cv := strings.TrimSpace(strings.Join(hdrs[k], ", "))
+ if c.GetClient().Jar != nil {
+ for _, c := range c.GetClient().Jar.Cookies(r.RawRequest.URL) {
+ if cv != "" {
+ cv = cv + "; " + c.String()
+ } else {
+ cv = c.String()
+ }
+ }
+ }
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, cv))
+ } else {
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", ")))
+ }
+ if v != "" {
+ str = append(str, "\t"+v)
+ }
+ }
+ return strings.Join(str, "\n")
+}
+
+func sortHeaderKeys(hdrs http.Header) []string {
+ keys := make([]string, 0, len(hdrs))
+ for key := range hdrs {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func copyHeaders(hdrs http.Header) http.Header {
+ nh := http.Header{}
+ for k, v := range hdrs {
+ nh[k] = v
+ }
+ return nh
+}
+
+type noRetryErr struct {
+ err error
+}
+
+func (e *noRetryErr) Error() string {
+ return e.err.Error()
+}
+
+func wrapNoRetryErr(err error) error {
+ if err != nil {
+ err = &noRetryErr{err: err}
+ }
+ return err
+}
+
+func unwrapNoRetryErr(err error) error {
+ if e, ok := err.(*noRetryErr); ok {
+ err = e.err
+ }
+ return err
+}
diff --git a/test/performance/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go b/test/performance/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go
index 2df52620d..e054b5c00 100644
--- a/test/performance/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go
+++ b/test/performance/vendor/github.com/nginx/agent/sdk/v2/proto/events/event.pb.go
@@ -133,6 +133,7 @@ type Event struct {
// Event metadata
Metadata *Metadata `protobuf:"bytes,1,opt,name=Metadata,proto3" json:"metadata"`
// Types that are valid to be assigned to Data:
+ //
// *Event_ActivityEvent
// *Event_SecurityViolationEvent
Data isEvent_Data `protobuf_oneof:"data"`
diff --git a/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go b/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go
new file mode 100644
index 000000000..b89bafc92
--- /dev/null
+++ b/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go
@@ -0,0 +1,33 @@
+package utils
+
+import (
+ "github.com/go-resty/resty/v2"
+ "strings"
+)
+
+func ProcessApiMetricResponse(resp *resty.Response) []string {
+ metrics := strings.Split(resp.String(), "\n")
+
+ i := 0
+
+ for _, metric := range metrics {
+ if metric[0:1] != "#" {
+ metrics[i] = metric
+ i++
+ }
+ }
+
+ metrics = metrics[:i]
+
+ return metrics
+}
+
+func ProcessApiNginxInstanceResponse(resp *resty.Response) []string {
+ details := strings.ReplaceAll(resp.String(), "\"", "")
+ details = strings.ReplaceAll(details, "\\", "")
+
+ detail := strings.Split(details, ",")
+
+ return detail
+
+}
diff --git a/test/performance/vendor/golang.org/x/net/publicsuffix/data/children b/test/performance/vendor/golang.org/x/net/publicsuffix/data/children
new file mode 100644
index 0000000000000000000000000000000000000000..1038c561ade4683e91b37e8298fc5ead7c306779
GIT binary patch
literal 2876
zcmWO8`CAib0svrUzRb+`nSk6zK|{VtNTRiJd7yyG8JU16mRliLD^_b=t;%||>na8l
zP!yF%fdB@t+Ui#Avu#}!)CeZrS5%ZMplz*Iv9&5~*B{>h;dOCwadCeq;GIS9q`Z^&
z4zVS!huE^`0kP%QL!#hTKe0dV5pkd}k|?!C6Nl&oqRgryj?$?_d0`G=rZt2)emhZX
z-9en7jfADpL|Ci`i8}faai*}0IAc9YoTX2R&&HotpS7M5e;NNJao+kBaiQ><_=2^8
zxJch1F2>u5ONGtEC2I%qt+kW*&U&Bt!TN}}690_2YJE;zx4sqEGeBIQz$5DSQiP46
z346kOMDyNYqMeyTbTF|*XM&RGx}8MyGpQt*u$=@DVW1RXU~nZTDBVa8s31KJv7}dH
zBIym6lHSS`(z|gP>8ng7eGQqUP?<$eHK@s{jhpc_xP=T*Zp8tHe~|%=yGSwoHz`r>
z)<_JcSPBnfsj`ez7!GR`jVH+&@Dv%`cn*ia+c-qoh(iobI27K&p-MXrH8hiA?+&y|}@M@D2V1e1j9<8#Y&blbeWd+C1_t-LV
zFPDvbjVnn9e-(BZ^RUCF!FO$1e2@DG-!tapd$u+BKKC)cZ(N7__@9t{+^1xpZ3BK_
z+^BiTZN?961>`V)8y?}C@Ca9iM~sK@DE|l^HJ0O1+cErze;hwDR^UgrD*Tvl#*evb
z^0Bc7|IF3mpN(JPpKV{`C;ao|6Yc_jV*C$&V*3XF!oP@r;V$8){LA<$_h0g<@jLPv
z|9kS8?F#uTca40(uP4WhjpT3q7V>v~7x}x*LB6)#C*N?7@EhZgCI~j&0$~Cx8>VVw!|d%~wxAQtHFbe-
z!9%dvKG>A@uAl4OuxMFt@*X#=tTqgl#u|G&m!hma509ElUken0(Qe4A9O41^*
zym>KL(aeFg;#82@KJFwSYKQPHo9H~8wqhMdMH`rIA02L+E*-E#6u$9T1*vgX6*vgj8Y?a#<
zwkmlmTUAoPR<-;SnBBGkbMki9T(X0$F4@V}xb0$VN_Mj~Ero1t@?N&Kq=>C;*#|7i
zMsTvE6r3(O#&d6}m3X+vNIX(vB_0RjBpz+?JkPcSo_8C^Qyoait;Ej8=
z@c!=nmEv{&O$k=uMdO=r+-qkyl^6me1$6X8Kq1|gj8iuh`!2qi@qvt
zD`oL5pw9FhpuXujMSXw7MqTasiE8$JOSPqkQ9bF4sRu_hsnJQBsh=j5Q_m-z);~|b
zNsS%7MUC~gP`~%KQhyx1PmT8uQGfQ1QGchuqUqj0X(q#uM#8D|1fhf$2<3r-j3C<0
z5ll}ME}$otN6_w$DB80;hV~LB(q0Y~?JZnNPjaNtLSZgFIU|qubLeURjP$^7w=>?-ckWN6n~%?+Tm9zH?b#7@
zXLcOjdpwDD_^k?bWakAsj;rarej55OKV9Ho*?$E7b^JBslKn>JQb8~-eI!HV02%2|
z$$&qUfeL|)m*d9pDm-MoK2I5)<0Yf}Cd-%{KN(XoRR;a1$zVk!2=9l1)T!@NY+X-;H1`;(b2(Nd->H-+gk
zFOzlkFK4<%sZ4k73Z~oq0n^=|ChN&fXL`(;OizCn(<{oB_2%X<3z;Ev5i^{-j~O->Ll;qr{M`oRE(3&|2mo>-j|YhX
z3QnwM$KsKCQt%ZKoA40!@
zPoRImdMK*?6sq!e!VGaR%uwgSj6pTb5^G_WdNo{GlMmJ6&2qJRH`I#vKz)q~>IaX&
zj|Pvz{2DX-c<>}#J9r+h6JLbu)R*7}@nyJS@Fv`(zAfKW(++pmbjWuOzJZ^M-@-4%
zT<`gC~Y|LJQsx>of=8Da~Pc23Nu}8Vfv&>)<(j8lJ}&;Q65|@XF9N
z_&+=buWDYxtF^D;b^Hci*Sf%ZmVoucJlMc8u;B!R4Z{=Q4VDjYX$7!}^?^-V3AAaY
zuw{5AY#&}A?_d*PhgJzYhL=ExmV=HHYUmi&z`Lv#KG5pmL+xj9h%JCaS_2%>7Qr{{
zVK}aRj7au5;yIy$(s?N;i;seG`Xbsn2|=A7nqm5d_}q!P)U)ktAE
zfu`$Dq8XAiXom9~de3qm&D4E^f+Uwwkn=hUw%kA=Ix7m5G@($Z9fj#y(QHXOn(gdD
zb1c1RuI?V1CwYM8IR{X2h9V^|P^9xEiqgG83nj17LgzSI
zWceE{){`h&N}=cxh+^vaC|2)=V(UCmoZb_~kNBVjsUK2G{ZXPc043_>XsKR-mexh0
zl#wX3LK=h8q$wy(pMlawGEurT8)fQqP?lbgvPbgKO8t7YYUDGd)^9_ax;;oI-G_Ag
Z1B%rnr6^x|0)0GUL2F0Oqfew4{}2BS2s{7)
literal 0
HcmV?d00001
diff --git a/test/performance/vendor/golang.org/x/net/publicsuffix/data/nodes b/test/performance/vendor/golang.org/x/net/publicsuffix/data/nodes
new file mode 100644
index 0000000000000000000000000000000000000000..34751cd5b9d1b14c3a0977e4fa4b04db058b241d
GIT binary patch
literal 48280
zcmaH!2Xs``*2m9GGBc?V0-=T?iYSVxfMUZtbEnr#dJh5?RIDhrr;tup#4a`z6&1yT
zVja>GniRXJ*q+b+?A`CTPk2UM?|qB4_Bv&k|K4Yxa?iaJlqxv8RVnAhWTmPq>`ImF
zN)ke;Jk+67WgWf#E9&VT(xg;@k)o1}309S~=N5-jWrG7s#s9Tf$P%SoyXurOzi3d&
z&dgd$%Mr_KpsoB-+7jo`R#MffZ0^%UpSwZXT8e4&-eoFP(_>e5BdtZL;zFNNZg+>W
zC+5183OeXD9`FJ(+N@M~JE+WfvT_(F&|9z}@18xvrhIXFN((+_ea?XCXxs@vUeMvff|nREqPdT1qMS
z;lD4aQWBrFf_@KT@vrEpXirtCiE&_Y9)w#3Uc`ch4Pd#wUZriGhz(;LF1T)Yt8^n5
z7{%vM+IvZxQogf$REA--!jgj1CHHly{>Euk12<4D8il^1QBB0}Hygy+Np(U8FfK)D
zX&IQkA)m@NhNIMc*;Lt^Z*wVCz06btjp?=U9ow!3Cd$y~DM3|W8<<@^=yOl%MwSWOzy
zs5uBgmG9KiTVyKho`_ZD7^i|D_#S~UP9-FjO<1eC9tHkYR7|AeC`*53Eyzw@h}a
zA+GCc)sU98Xm+jbP(uv`a9=A53TKL;&ydL5n=HhJ(xFLeXver#HOweP!njxrT}%?j
ze@UnG63oV>0cGQ@ZG>dF@I6n-
zcm=!0D{kU)l!MZb@Hvwh0OvR_&z52%2?QHz64m3k%a&$dNJtuS#Y*&*IW~g}?Hfpwj
zCH9m}s>pixg3r4bmhk3eHFj6Ho`AXOaHqRrE)jjBlE8Njs@y?H{ALWfZZH{??_eo@
zqE#K3nBYtNdSSb6tW&`AhIs79|UzS~V9y
zRdfC75F_AHha?U`!e5D@!tDSr{?J3#UE0W^HJ@tv%oKH~aY(zG;iAND=Gy2boDTul^^#8=zxgJdDSVr%?~`VAy6cy2oH>cA=>$+w
z0+8lefHW_{xOvT1$UQOu>xJcWa>H$<3G|v
z&h+%JRcE%uq~nqX$enHKY}eCNi_dANn$b!~uB2p~0U-Yz07YB9jODWs;}ZO>sc_@l
zg>~v2!vIV8&3eNAJ-y*K(H9=jNMhcbg2ZC@%AO(yl|43A$D=4kciYLFH}TCs!L82k
zn&4FzxK^6naywF5;zTY8)m{xamVLYE+qpgkDdti3eyVN}6Gf3jG
zT9Vyycxh;umR|={|c^IwLrYwS#iX+1JoEIVzMeC3?
z4mZ_BovUiq#pbu|bc9;fd?Tj?%{L)de6mf=-;A|OTDLAzmpXH;>N4XPdV^QtrLzt#
z!HMWAm0T;`&hQj367mmvW9i+>nm7>V_>yOrToQ)vB)Wmv*Qt5~(ga&h1kF
zFzj^r9_>+A#>!jORb{!2czLFu%(kJvygy@JFdpBWx#U4!7Qrr&kts_~Rad<^0pJlG
zgZt`mbzrhu;9{he>&{M$cOx766lQm318q+d7te0mJg;=B1)GaNP%x_=eZ!^WYxulJ
zBI})kQtvSiRU67=O1eK!rg~N?mALX|O!s1^L&MGVfwkZ%~qeaHE*TSTdR9Zvh^
zn5r%@A2H87VR@BQ@gJBZ1oi%Z0kZf8KOM5(2cE;4^KO&jIRf=X(*Yhl)sF1KI;>p?
zNY793MP|U~`N@Xt*pYp*y9Q7)E(f!BT)nDy52#i3Z3F65gOP5f
zBONWqKx~9h0fuu4HhfRFsD{K?dfij%Rb$V>TGeC>K|$H^053ZpYi2)eRBgAbro3uVe?951p^l!%fHT1dybcCk3k?`Sy_i_f$
zcQH094l~u##1vfclSs-~)uNV#c7Uq#8Q~j@AK~Hfx%V!jG?KW4CD>tqH>9!wa;}j2
zF14)nuw=E|dmf6H6fxNzo;dbPqCw5k<}<@i=`J@`uQ
z^T7NJV#UKyP*n(U=ZT`~HB((}>k?M9c+h{DzRD&P1t2>N-lq%+c*?87eYQ|{4x+9U}A_eYA
zChW(d%9GiFA5Tj4h9tGdC~m<)c@Bv9Y>>((a_wG$ab_!%{e1LA&aZ=9RHxP?R!}Wi
zfoyRA1{PcY6<`!+RLx8(Gwb@ni73>_g)0`;1cQ>B%aQu)|`y(@}r^au8gz
z0?b=m)#mMFyR|i1t6Gc$T#U^3C@or!4RfxEjWKqRx*)O39mIZKn`+tIU{$ToVK5hY
zz#O}hj`BfX)!Na9jj|g+5P6|awHYO~s_oUc&=PsWP3~4NQti=>TGier0V`hEt~#PU
zF4bvxQB^VpEoDPxl{%=MF!>k&qtmWBTb@CmnMH=>kH#Z!Im9?W2^X>&X&Z`|zd~$S
zI#gHU?poDt?y!P6mo{%j1JEvQqH5xT=SQj)vJ-n6D8*TeyAyk0E?x;f@B5lWh)^Xd
zP-tT!y01;f>2F~!lEt!cS3SLwlde;2swX@iiEzxLdRh;lBd^A*ws_>R!2LDGi|(kS
zR0WH>$^$vYiwzH@{OjFnOY8Bi`0+~zs=jE)nS>3})5ZW~AQsA4s@9EqIqI1!}Cgt
zdNFahRqZsc#E|=pI<@P?ATCtMEQ^1(2rUseyp%N-wjlq^(kt1_v|KRsTMW`}ZZmx5`wnIm1@<+74M{BCptpp@?6{bGXsvrT>Kx)@or
z)Ks4)4xr?E9c%tCP4!tK$;wjj2__33N1|%Ahm^k)%+9Nbf%Enb^||qCt@=DM*o8^i
zor9}a5|`!ln#Y>t+FD)mC~M0NE$VmUKJ-;S06ybBY?Q1>QonCL8(H(@B#ScMYN2BQ
zy;XORNa1OX*!Y#$*YxWU!W1vxiyzLH>n#?evKB07wOcHSwHR`5MxSAAhrDW9tch2>
zV3C^>W1X8U-fp)fRbAd_NlM6iUsZro{|7#ht^{-53t}i?T2fy;tr
z){@b=fvW#5Jc`SfAC6)?e6x+wzd)08OJ?3IJJop*&%R``WQy04`Qq6~__C02|D=f^
zD0V}3qagpA21~!vsa8wBgx6~6pYV#RS@cF`gLI1r
zh|9L^T*+n_pFv#WGg%qpXC(bL5(c5#_#kx$CyIU;v9om-h=cOK8lv;gXF#UEco)~_E-idT(lKl)Jd?1
z!xzd$B9t3o8Z7gmsss0ptR%}2=XywDY%K`J+bu(5-_jeEqlWixH0S^9whT?YPH*)Y
z$(CW8FKMBa(*U45lP$Tf*{zn`#CnZmoj8@<%y!0I%hD;S}KQuD%H#;n&Hec3c8C`yMn`MkqWwne+9D;1PgWf8h
zk5tr8$y@2itz(mk>8%|YnrO2e&=pKU%Vc2q=e0wwuVZ9bhaiag?yj?p^=xuk#wN0B
zLC{WM{oN}*%gQ2#CtmbBU|2X4NFs{2PgFIns*1+2-W%BLxogN%MH0BUuk
z<*RynZ4S#pi5z-e6;{hQV;^vheI`rz3G}+<2Cd}zcFVX{Ih^Dl2sHC$6a?l|ihkZ;
zIoLImj=+n_mV*-+lxoJ|pmUlXYe`+O%m+cib*PHG=dp};zbjMYsWzNmipkKVR;;zu
zS;i+?a3L@QrNJILoB=D=2AGyZjKi#!LlPD=mp=7a_5pFcn|z
zr{nNcWcPMj4vlZIu_Ic+QZ~B-@&K9xRW{3^i3plYex{nY5`6LM4$B0;JlVv%>KX2H
znjp*UmI<3*hRV;onEsNS@RK$n~!)cN489IS&lM|76xjz$8uE5beH95V?eEC
zvVSh3SS|FvU`_^TJw7Wul;k@s%~({7TX`36l3(2C8P&S*rZ0
zwU(+xKNLh|t;=_|(>ugu`Yp3ll>=^IQ5~g1+VV@K?VMz+_4A_OQA+ujVJ&}Q&{7=<
zqs4obhw3ehAvdEc{{iWJuo1EzX6ItNrMhLbmGM;_tQPGyMm3vo27Iu@-hn0djswlr
z4MeTQ2j*L;nzwQTUL_lS)u?t$jj@o9s*jo=9q<`uo291Z&RUG;(B^%+-ZH}*bEEXF
zEtvca^pVFpr39A9MoO+*TUiP|1lcME?E{MEsDR?&x^1ihFGo4jn0_qEo*$H`-R4IHNvBD3e
z<8z`>_$$>SM=@kL){4rJEvlBbO|TSg>xO)f(qbfO96Ds7iimq=^@OW7bWmpRc=wghWsD>EQ#
z5G#9x{j%(FOqM;H2T8;r9jGeXvJ|qNu$S%H1=&Ze%0B7``S%tf!t$*!|0r$0T+Ilo
zYazBcBo*26jOCC47%CrH067x%`(~4nLb@In|gBUo6KCIXD?r
zi`__k*lIZ~+EZ&eEwPri(1<3>>8?9nD3CMtaGp(-lgnUzm)84as2_t2Iqr{e5M5)D
z37KG3Dy^Fc=@$xe@%DFS<44*G?-@*re#+2BrV|qPX&@~=6X5x)K~+-yBJ0(lfw=Gx
z9f5~@0D1*4J+(dLWMwx~=(bccC!c`n_k#t$(BbkD67y_e#9ksU`V~&>7e=AR!yvnE
z>m*F0GAY^W;ad@e+<{o{tBN70cRzw5Pd2^&30^>|HgOu?ToFL#zebzB?e>Hi54q#+
zy{Fc<(*>t3Cc4Wzuq%tX@dc{fho+&=AeHrfVsII7-B+SYUt$I1NL+O@EEOG;0vDjq
zWhHc#Pi(<+d2laGrZ@gz0KU_E(6W>cL*A#9#jaybJdCAOKS;Ra@D-g5i|KEI<(WFL
zbf7AhpiQ5Az4EB#m!qUU@CBMts*hmNT_7!dnb7GyIl2%9o)qF|j3MQFS9D2%KaIXT
z|LTF)gRB#zHyLyaNJjLvxi(VO%%iPI`*TD0{z;$QGxTLsU;1c1*N+6pD(~(zi{)^M
zuGp|lZJ<;@iW-kW@*W}e47t)XhEOs&CtB+tfJMC$R4V(|b|FL~2^}~iw^lkXRVTSQ$z10kOa7Sz^9KNTK7^{k#jt37{U!BsmZXF4)nrPO>nNSo
zt}+rbIqlgQanZ;v3Lazt;jFBle
z^nkjnlPC?6anqUe93bO*rIF`1*C><3bO_4`I>c^P@E+oe8wa|O
zXsp{GKW(i~R;NH8vyx}sq=T`c-ujQVzWLTC))z?~KmB;)lEZOM1IhZwf}GR#&Pp{n
zdV;?qGZwf
zpS=Z}Fkba*GI72gyZXb9sJv&7Z9u&ibB|_rSL_V3RXzzm^HV=1d1%$Xd1k<@*B9qp
zR5{OQiuWa^_XU`HmzyPwzFu?krghRLv8sLykK(r?rn9Ny+(s6Kw<5GTQXoYvmhqoq
zDZ3vmMk|3ZatN6FFa|zQ(*tHnJww(%{rFJW1GE05D%h38F8&@-^RY6hs+kbO#wZkA
zL`=gc0n+%amA$7QM1Mcz+$SZ;tN2DdsyG?GYIhO}TI=YLS86&q#lxu17=>G<$5Sdf
zCzFHHD_x)pgFat2pbB{nShK!Wjo7SLr01n~G$D3BNry)knZhj~^%Uc!?+B`{G0XB~X!O7%`iMVpNn$hD?-3+VN$U+=GU
z_}rlKonk^3K+0NatCYKI=MqTKiFJhOxF$7H&qniUdP^Rs%{UoiPLt|RNcBNPqs)^8
z=FbAySU_9;#5y&yU1H#zM29h$Hs@gC5AZO@`oEbQp5M
z9(e`XXgBC}Mil3EDr-XT>`$8@7=nb;Y(-Yz2kV5|GfnkSZlc!I{wv`Orh#2jr>gCC$tm$v^WW
zdP5JP%5^;?{xxEe#dW0eEF{Vvrpj#!TI4px$f3hu)=FExiOGLKf<09AmfD-FZFO|a
zg5+PB1XFu$j!E~SBTsayXB72h*L7g(&n6Nr6jvS?`@-8
z8yYQA9RN|XsjCxcQ!<&9z)%8135*nh(YuWa3?(p91%?tBDf^^NU?d3)B`}o0Py!=a
zU?_p11cpUm=uJr-CLIDp2@EAL{^x3@%$EPBH(4!0fh>o5Ym+tS0If;e8v#YC3UZoYkE^EJoZ1b+L}9AOkM`j=LnZvm+O}_
z^0uXSfj3%&Mb0Hod4XbBTObl4Pk*WIgM_}XRR>mi+??{Rr9i059xMrqP?_EjW^B#M($PERaG$S6xBPEj&5&Y9|O2v!z@JvR%d7LZnY+f
z>fu(1Yy?UIAxFR7ji6vWb_+h`*?I3g#CrF58MnT3tPIqlZ!PmF>yppyIG6?*adNks
z;45xY6ZU@QCmK8&9Tqt@NQiA+>hL=bXJPCoN0N;3eom1CL(017#>Hx4io8YcKfhZ|
zyvEl>b|(4M5&lX@$JqGTc?h^4Q8blU_&in<+2SVYC}I|
zWCkw>sXj?3AC;tz7NvjZ6PC$;>(F9qSqsbtAazpfgp-)|&Yr%B(>oVrWiY0=Abc-6
zb+c@I?x#?oN8c;Zy{_*ddcqpzv}qX5nY6iYwNjP6BK}0JI(C=ZN>!E--h@E1VbO03xJ;)s
zf-1MVAl*n+-{I-2tBT`^o+kt9n7u`dm38@34t1S!wqVhjz;q90Fr1Nmvu0AP~HoZ(JWK4@3b`XC9)|;PI%g_
ztg~SmnT=R)i1oIJTh<17G?HEIxT_C^CG$p-L7#&3Oi}X4BgA^k4xo*^kXM(tzS2f%
z56Lj3qlcV)z^YCdIh1u{G~b1*)KvgaT_wv1pEasM9Rt(}?YOG?JxMk913!Mq1OA{~
zR)O5awW?&vmrMvX4ns+4T%QaYF0=6SQp+-WE%$pNZ#O|cCD_SV0VAiIxn;?7kmHviJ6J}4@JIbo|HcYr2g4^TYw6Y*
zxIgsr6jhe}cm@l=-D#@q^;41P|6>mxWl~DLy%~H#rley{7Y-`?$T^-;(il2)dMCZs
zUs+dv5@Atzf%o0RUP}k_SY#~^QnK9R2bBZ2a@Ge?HJvuw*UZMW6KEUS%xDb?plZ~q
zOJP}#AN}PHASr;2n@bGz(emfLtwYE1Mv9NcABpJtpKSAEZ-CJyP20fb0R>6!%cj&(osh9oJyNPun;`OBk$Sw8B0fl=kZK__h0Jz9
z&TfLp7fe}}G=}z2oX%atxFtVGTkdwUZs@oHbc|?Z**uR?$f#aSaAY6sKbokiv6Awa
z!ZJucMe4T%l6DITtYp1%$PI4B#6^(f8J-M-fzK#PQ5Abe@tw(Os(2x1p1^rIN+V
z$>Q9-sj6~wvZ!)v!l0z(Z>M)p7?9VaW#mJWK6~g$k(>O~19w6`Xa?W&C>?PmNt4z<
z9;A0M$m#FoKz@ZUBfST*SW4^2oQ#d2?>}z`38LW39FjqT45TA-Jq~8Rv<(DlQOH2V
z`i)!)xv~(_%-wLmmB{vcAclf(Ff{Z!@b%k?f_}e%s{i@)X0-!yz(-`*pyeUR@5`9`
z(~uZ?1geH^L-VlDu3>e&7KsrT5Q7nqqblPCv}Ame2l+P=8Q;;7nM5@+)ep%)bLIpv
z=dMkGJPk`$8kmRvt6No7edbqH+b$#anTuHFl4}=X@*mw)A7vO7ADyX`FBxCEL0j^*
zX{tKjYG=7_&E&)HcjKxi?^70!?5|N^9g@bFf4_|j@LSXI=%0w$7A;XThDA_nU5)yo
zD`DyXF0t>Aan=_fSOjENza|F$5hRjcM>gwH&}UtVTUiU?lLOz37Z>Ad*6k^fwM@T}
zlbcy6t|IJ76VSo|P@NK)zeJs~dqoj0d`U-6S1QAE9O?(2f)>licrPIb*q`
z1sBdE*tTn4#Kv0M?3E0>zO6{Vf`QLC4;YRkftK622+ftGZT1{OGGcfa3)wRPB;KX!
z7=x>h>LrkQFgxzWuHz|K9D87KybSu3abd)MB0?FPJ0V}BF)}_)_0Q^YEG5Kz}0W2lk+T*xF?>S}WA-
zx4N3~P0nj+{b13Tah6YTHM@}DTz^bhomzEd4=aB(s7~!Z)sNHX3W66Uew%#Q-GRRJ
zWHAo<)3RSBm9yWJs%&@JUoKat-ElNSoAZbmn%<(UwJS|^Vo`ICIw{b#NKG$$E2>W3
zd+v7S3_jLRxr*-UQRYjv%ay0-$f)wZG;eF~L4?ytYVSeBU$t|o3M9VkQNhiYpbEWp
zctAy}uSr$WSK7K&-i#x`r_YHaQ<;q;&mDmAyNFfJLNYsN5^Xv2NUo7Df^<{~NJm|b
z3q#s6h=T!N<`!NB^xs5vU>zNUJ|=28JzI&|3A7D+<0hOA4kN@#uOWNrD#&rXdm1H2
zgHd}}P}G7|DjwU^t@3YsW|1l^s83ZzFKt<*iW7AKb>g0GMr)W{xTK!8T%GjS>U61|
z92iEH{K2A~F$(E0x}~G<4E$FX=U-L1q-tta|4YIxO5IxNUsZWT*85*z_(a0@|HAN#
zY~cU38x$>l4u<|(4NEorUrbqm4T;4Zuz
z+yxc^X}x^`n~3FoZnqY_!c0*kWZC?Tbytfij&I5)YiIW3DXdMOLy~W)hh=CZ_~d-m
zU)~gEtwE`!nI%ZcO|<1r?CKl!%rD>@9V8@LmDZf%LcNktsx0yrz*-MccO?;s-=x~e
z9*`mrQ#muNOxT}Ug0d#EXsHWfk%wTV56ALQz5n`2p1}3nFK3aIjzz5ZnO&BD{XD22
z_Vz#2w9+fj_WD6gAN>1R^h*n0j(dM^Uq_#WN-^n`=cA-)c&Tp}inz{Perv1IW
zpF_*FO`oiOP1$=VXta_W?$pdab`~Q|GiTW5{wIYE
zznK~!@7@P&@RNUOoh)-tr!gM#y2~mDX6wEcn7rA?$P_Z5a%8tIYNC{}nvRs+3`O?W
zZR*_m<=fSHW^S0aYiS#tLRvTu3@{C(G<=3gTuKt!zGv$lWMISgElP*~TdYlmFY9G`
zt&_lFlRZ@5@#v$F?K`T22hou>wg;BU!aSJV=>N8t(#a7BXBsukw;jIU*y;Lpder%O
zx6&q8LuxR;*H!%$&Sd1V@{drkSk|D=aSkMdjnukw7cAN3NMxT*?+`a%EDVWqaJ4=p
z@e`uoj!5aeL=FR`lT}ZR!5fpnTt-G%jAS8bk(aE>{v{8_ix0%^pm}Umw(l9sl`~l=
zZp&*{7wq^MllmpAzUI^V^zo?QbxT-UxEp~m3i3kLdfnPils?2g*TNs%>cS;oEMf$I
z>0pw4!}1{SH2;)Cc6pB1AxV-O>jCn?-=FeX61Xohd*3ujmgt%C*Xyi5@kN)xqMzus
zguYe#-%59jzG+F^B#`YFFZr^874tz8%yS4+pO{mw;QCDJYlmX_(zEYgUL)=Oz(TH1
zWC~qkd`rpNATf|R(Rfd*Ebz~*I60)A@z^e>zOY7
z4@Dvib=y0aEU|Cr8k4&|2
z@5yOMd}f7D1|`V>p9VhU{TLmxcIthrcjecMs$8D+z2WV<75c9clEu7lRQr-kK2gva
zt8(rELBVDg8J#&*Elk~N`65{d*0~e(;a37&kG;mE8}Qob{rXbQCGT^thg8`(qYqov
z!IB~F_gxcICG{s(`y9OAl9!Gyd1B5#j7J{#8BP1W>>Zw`F+T8&M(*o;OnbjvTgQ#o
z-V{}N{~~I+4dRD>_b!tpcM=2eC%5#(`RlSCm(5FGE?Z+Ju|FA!KiqH3l^@Fu;j-($
zYti}qN80KT>#f82I>z-Lr6+dZ-E}Xt(f{d9+Q7K!UFh#1|6tKOaY}wbMj^!L%l7`a
zsjGja@(1Sqn&
z!YfnxNKt=5r|(%SP9h_!ekLcYKZLLPQ>r!hBQ|3oRW*Uw_l3Q84f~tZhrq1Au^FgH
zV8;{WpZz_Y);HE^cXyGUqd=APhL^J>)#(E7|aq`y*|sVHGYRU<%{?#*)=M6(W{MDb-MAl1bq5e
z_If;Aikl*=PO3+lZAm#gBk
zp?1dOkpCFh-?tan!Q#0JyRvI2?;_$-IR^y#gW^AqTURG!fht}O^YG(gc6e=Q)<-`E
z;2PItdV2?c3|iv3Xwl!?{c#;_UG~1Y-%CB|dY5n(yp>wBZY#PEXnHHs=div+(!oiV
z4;|FJG&Cc2trpYC(8s67*3uidGGzLT07=KZmr+`f2dc8)>Che-b6u3=)urA8YHr0D
zz3ys5zGIr{mA5kbU56pp%!atHt@Vz1K`m)3-;_It(A&F49N2;ueVsqB6$zO=6GQ5rj`oHX1^O3rf10%;L8?c~_cXoyf(HqmFY;-fzTMZ~t@rwt?1I^0g~-Q2
zYN5%RJ&SJ}bDmkX-z|-Pz@v-kJ)H3v_TZBJ2P#LpgHjQLocnL)WzxMpjLaO4bhZbV
z((&&g2hs_|K!0;G;89Xa-{K@qLyP|K#IUzu=&58Btl++B`8Pb?jGW%cRBPR;t`B|A
ztkr=XvMt0~{PYra{jOm?bwmEJpt|Ah6Y2rv$L^>`Y>b*pW?KunaTxptb|u6&23{sY
zshf!IfE|>q&mxg)Fh=RmHc_nyc-ppfbz@>Rv$p@y@C}k*S>=>M(ppFmNB34Vy9tR*
z`J)jAFn^%r#T2+-e)`Z0uL
z!0aw{bCTgvH#hW0>41Fzvc&PtZVRbfLNytPh3LrKhevXhROxb^m1C`Aec6q^+QF!j
z@AYnZeWuto9O|~%8b4U%sWR;v#v|j*9(CI*0ot->ap~0|%YHl=
z^m2Xow^CnqgK@~^9H?_`px(c3F;ocd;?UI6+&*Z$M;Dp&G-TfUwZ
zePX%$2yD)LLYy;&;Ea?t%ykfvtddi`kw)tILIFzh{efrI7T(E~Gv*Pw^EJ|IVNMWi
z@4vXA@?6is#y5*^dOf#Xi^-?xbv{M>s*Vf8vLl_2%67yySXs2bqBnmOvWC3SieAzI
z^Vgj8iw&5oJ|}916+UBX3(PGkl;o3#!bwOJUBZ#BXzJDIOSsYeNfCBuW73dqv9y@X
zu9VkAm5%_j{B!(}zrv#og@WpH?UXL2sw1TTYL4-I3s~fSyz+Mu`#lbpXF=+Gmgg;J
zE7tTM!g2l#i%agG<&WMd*OBNeOrUSZm=yq@$hDkn9c_9wb1fm8^k1~`K0(QIZzdgw
z5m;j)&iMN8fDBv?IZNPHG9@cd$->nI#2{YBjH-5Ht+WJE)tV|UxR4!+UElZ&$OOV^
z$jjoGoW6`sO2#2V{#Zaw3uEcNSTIY(J*iEQ@jg(=dg@#av!{S8)_*dka5t`c7f0xj
z)1mJz7MW-W%ri2RP&x<&W-EgdzuAREvB>r?w4R-!^wV0zisH!S9fwUn;3qPbrTl1-RA13PJ(i9TbKVA
z8q0{U>FFT|0F`ipESS{vJtR1Q0l&3
zG#?0aVt#5(PfvWf+
zWJ&22$QoI+1?1OafxE02{{a$T!Jx!f;79xhsXhTQEG-y65s+q!h#gK<|AN;0xFqYF
zL@iiMc18oBGA?FwSUl|zY5bokmR?K*p=&4=gX9G
zONB2L9%U*dMW?dv17%wo`UfTFpcZ&ICqu25oT|{&^5TQHzuy
z5xJb+${XR!zk#;+;%o+QH_Xuwt|j#{cR{AEfLu*GjEGQ#Q~`5bx
z$gXL|q@AkK53&3cmO-Xt((AyZ{PUaWI2=_@xrsJzfhEvEsqhGXhgNtE16B9}zql#<
ziZLqufxs4TAjHK_5w%J;v8qlT3OV8i$aLNnRhQognNte6wghq?*MZf~y^LFS#A*hz
zM%0Wzwq^|atjv_VPfxadb
z^oJ$6l~J&ro5HmE7|o6|VM(6DoE}my!}A!iA2G+S*1r9-2hAs8Xh3%uiN|5Kufv-C
z5j$dONH}&ZR`&+pZ&3Gkf8VL@3+66T_jUY$KHGV0MJj5;%cDimkqUsp_F!pEqOe$27G*Dh~*18mh9Qg6~1M
zTEAsLntIrqSFaxKxv^b661u`W72?O>FI>X
zaX)i;z1rYCx=wA_n$)Ns^>egWS+~G4^bE+5c>#{jN3^O(ce^{(WBKz^)MKrwjXYrt
z^r^=^yhV9DF}g)Pv3WNkwx)OBbSkA3!>u-kXSb=1-S;-BC-vXANP54X(6RSCxn*So
z#wTLPvc4G`FQCu*nUDexfDZA3;3gaA@exEQYYR#TTuR3Pkg5S!BawTgiI<1^3FqtL
z)(WqB%6)2^dTMWJliK8-2LcB{RQ7xAYE#c)jp}K4qfI@%#q_CXO0Gvib{F~v%Ae~>
zI;W0D_zzOhcPp-rtVA~Hy9V{lYtuT_v%&iosb_bLY=>oJmwGOCo=-j3Gao=1Z_}m@
zwBdr>Y4tk*^!<)R>CkKG&A!yGo-aQXLn#}n+IhTDIX{EcbsDRgTMT*nZ(%V%0a@Qx
zO5sUMSRFH|7G$MEhAe^{C2dorP527pK}azpSWw9tR&dJ}$o*-oC2#hyjlIlTT=cGx
z52zLggp|NiT)qS{i&E(=luGYh3|Sw7Y)FGVCe@9U$}gg~Tz+U=k+mH%2$l-@)o;ah
zQhFJ_iq}#h@>p6S&&1V9^j3eq1@cV~>yZ2aw&ov8*vm4vsONjqo7D@!if;A77G5SM
z^zR@?9z-;XUnES1t(9f?Ei%lRLJ+;**$KqObOa-`g=Brn-%d5;V9ZM#os=HNWb|PS
zc_#TF6N#yBC#7JBQvSj1sCpIk?&c=cPiw=O_fZv_H(zFMaGO?-DCdt{-U2k2|ub%gsoJQdEG)
z_S)e4kf8{tQLXk#TRlp>uftMeN3%IUjp}?ahMuM){%=Y(x1%q3o`c?Z+JW1FmVjK4
zgiU(OFSoOOJV~&F*;osnOIvXfHljti8k_5%e3*8&!p;
zN_DOe&7U@7{2{air@$95Iw&2FaX0g_Hw8*xS`4!lmKdL!smKYiRA*z-lS8%0Y^3e=
zWMKTjgblpgN$6M|RkQ))#d8|4cCa0#d%6L1F$e;WdvWGgX^YTS6d)~%+@wWiJE#Kp
z&=J}Jmgp?1)%T#U`WE=g{{gDV0#!vJEoz25
z)CgI>h=4^pKp*#^S+djE*hRGgeFb)yi(hO9%V#Ladx9xhTniTSV>g=RU|k?Hs-mKY
zNKQt=Jdmnvf&95!#C+{Q(=)|;6q)FYR3hcp#@J+a-N4D!B7X`JO>i0qQhAYsorjfd1E7DyU|zvF8X5YQ}E~#s@_Bg
zN>2ozaUp!+C-9^EIgEQo(&qdX2R%b5xi=@HYKMnXA(1Tls0l+dc+MRt2);r{0uOqz
zn-7b#jgUmg6R`X}^t$B>$&wWSuei!V=?dENWFhlhZW7(6(Hz)qr@D=j^CFa1cvEp&
z-ou8r1qsuQsIU0J2fl5{>c5F9M-mluH$%$3*vM(zAGNTkH=oE6L?*u^ndfMO1k{r5fa}RQc;+32aP8>;@iI%yG2o
zWDWe7ferb?7r|$-8$1rp5f6r385j?&L?W<~j_AkObyk8UREo*q+02Tfn=xc;qt|(e
zn`*pgzccP>Fjw5%Ot9aEC3=#mf5uLj?uI$Y)}{*QHX`Ar!}kID%H`QFuoe=Ux`Ws+
zp;Ue^AWP*mTs0OF+C`YEWCR17ZD=`^-snD5g^r-A*QRJ8>I>Y!a80Kpcr9Au$D+P?
zlpT<7kwZok#D7u%@*?_z_crd2ee6;sH0H>vF3E^3^uE?fV*UVgNREyEq
zP1fR)!U*bbLX{!uUHqM!7~BoWz??4Jk{KWUm{Kqs5_l56NIUof8zJGB92n|IWz1(n
z+!5T0H((<;!A_iCZyO&;<5g5+5loNFPm>1Zjo5{=3w$hx0wK%LW>n4(N6!&ikAl4!1?`ADo$^r
z^d<^ineAw9MtxMyj`3+-$R3YuaV;!`mte>_HWkd*!xEei+`^|Q`Dgjb*lzUI+=iI@
z7?f5`LL#(*-U<)#^T<&=(gvSSW|OvGC7CO`%qR0EG;L;qP^pZuwtuw^O4
zLEh`m|ok@^7P+yzp5D@9C>bZxRyY|k^BmBZJch~OAbNuVv;^(+1~wq;GMdr1
zbrBM>pPTck7UeNoC6Btw>pUv#aSNXb2H8=*S`7~%B{MWx&Wd*$#Hy#dVL7OMKd;u?UK
zko?URvl+3-(iE9i?S!NgYtb)+&qbSgRUCw0!ldA*c29~to
zp`glJMZ#4{U~4uZk$*7?j3>OL&$lRb{|XlORL}lk=bdgxeGeq`5gooIKD2xab7>d_
z#Rp?IuK*WfE}o~OU)gYLCrYd3q~Q4#W|!RaN9QM_FQo^~Ly#yQD*DcBhFOj{RR_Qv
z-qaRWuFd?Cre!Y=+#$E(G=SJT^(q-9V~fAY(cXF
zLp5^7${5Qi7NUvrPw_%SJz^r@Co2n
zUQ1i>Edo~lvklMPkn&$>3oLF0^RILmr%?4wq*PJS0bE)CjPu(ehXBYt5<~g(F;wzU
zJs}QIEsbHLbSh%{jO2a>F=Gn~^x;lFOoht=0NN{>+mfuTTCo0N+omjWntAfNQ7pQ
z7J18M>P~D#Hf(3e-lt@~jSHcjXpXcZTNJdR^q~k)!Wm=Mi*F~?*@TPFyjhJ<^xTHZa(U(
z$h#2Zq%HtSW}Cm%Ltc>TBuqMgh3EkdIWSp$2dZ5CQDx3=NBuQ+lHn2Tnr9}1^y^02
zUP6KY9&A+KMs-HAozar#WdE%w&^uj8CctB7`7u5M=HQ5B$QDyA+>|1~z1Wark6ykC
zPeeAfh8V>DEhBRvr<=Tk&>Rr;(H~)s%cDX8H?BM*LWUz
zAWKWhOYj+sh`9S1FTJ~bsFK@?q8n`}m;+1oS9T(P4*2rVL5u$zY?R3F>4NV!Q~kCL
zlkY9b2@CE=?Uy7kn8>@HN4WGHsgA3KfIeej1Pw_#{
z=mtv|kMtg@2Tvcj3T}frD%l=B0OQdO5V`2)&s_jA_yL)19Ezbl*?P-YqN?b(c1SE0
z%r7uq0`q`tj6@=ujYq}2=Hg9jCw|OEvvDY66nYY9=27@jGQmN$!=auiy0VG;=K)yL
zpVb;q!B^nHMtB42L+4;Tn;xdA2R6h8V
zcLXYshpLL38j*Oyhf?`SJ0woWFG~i&F0pYK>Z|8@Q1BgqoCjgDSRW?z%5{q$eVM2)
zzYmkapqFY7F#Nl*Rv`DvHO)jQaBmaj0c^N`K|&8yNbVU*CtxTz6wM(?QC|lv{*!4d
z{RJ(B{je6RLpF9ZRp0aQ#bqbaS688bBH_QG0ezpOfWD@Ssc{Ew)$?d8`34E&0P*PT
zZpb~{tw$XgFU^CL-Ik7B*`3W)V3@6t&{`G+=fPMDy@g%9kw(u&m41t2?sO9?AHC6;
zSPS3nWq5X@IeHvv<~$g?p*Z@&_aj>|98!D$NIgx>_>qZ8x10ox3q17753u!(Txf1P
z=oc>{R!)=#S9YM~>aCF5xN8X|*hoV=QJ|0Zdg;?gwUP};lnlYR??WWaNe&cTgl1Ew
zY3VH53d~IOB~jJec2t4DOo-UQ^p?MlT|H1WMk;OVFs?7?{Cn{-w35u$Cw+6BovQ4d
z{uzk5U)h4v?*sUjfr9d9#X)(-@tgrEigcm$e0oE*Ot8YosphY5q$JNIt`8{{nc}Zp
zy%=YE8Px_@yptPHa4H>^OmIq$C*|ycGn&yXXxj
zFcjQ@$>3Ykt8XSwh#_`5w=??nfZltmq8=Rlz8M6AyHS5TC37MY?w&3>21d=2daYGP+wI_hkrHB#2!OJkEL-eeBsfU%$M)TqI)}Fk;8HH
zq-L^M{$QeK78BNU1$^c^01DMZsxC%L{=-NVji)2fhWgN*_z`nD{(P`5co9RfWw;eu
zj7NbLh((@3Ouq;!ksGe;zuK^?;c7ILbmiLFq;wW^vV-+$skI3`GhItp~INhO2-B5=&K@aBe&3DK8RRpLlf5SplZl03wTqA
z^G7z2HU&ZOA)ZIRh3H%2s&C<&kpxSHTm0CEf*Hd*QCf+$ScMO}*Egf@JmC6o6p(tf
z!nBo5b2GsTlfnE6eBtLHk?*llVPd!DtHs#3n6|tp9B4idKL4YTSZyPKR@4h2H%flR
z1>-~jh2Cg`$T_y?cVHMcO8Wgwi99L<#E?~20XeB1@4{F`wi95
zHdyo#F|rUt#baqR=Gw6A_kEqy0l{EZ)v
zF2`DEHxj{lpfa9-#9y}4A+Ja(d)iU;0l?$$!x!I;m)ehl-6?>SQ&r>|AE>sYG;lH=
z>EnLMI5q-#_ul)SDf+c|o{Ca^jMo7R$a8#fTmUVVXmM8JrLhs>k~qiC23qvYMyg|2
zfg^H#67AQH#9g$N$c2GB7v|8n*yw#@cNba;laLM0?_k`Lfm>k1ZtvTh1k4`U#%7G9
zE&g6A9kM=0SKx=XTd>wcNzx~@5LEi0S^l~gEiPF6n;|8e>cR2=h9VDu&q!`Y%Nm&V
z)1qD(3gy#heWe$Y*Rs{Cv8x|d^~VK;%*dT-oBwMZ@{
z^x-Ld1rqvYSW%d^@FfE1^bXpdZzgo<5OWDB@i8y=5pdT<|e_n72}mFZIFBE2tG-2g;GR)p*SO17e5$7HL}|q^5@M%9bTOF
zL-b3oV7b7Ug-O4-75cy8&O1t~Dr@xjR(Dl(qG=jvkSG`t6N8E(>a85QsybJ7ZgfUW
z7{=^i0y>e=0dp9Y?=y`f>gbFZrfHfE&<1ow5FK^U5i^L2+4tMKu#2gu^VVDIt?&J_
z*E#p>boSZzoOAa*VMQ45Bv#YNj;sM+n_PM&3x<-LizHmMcaKviOj!hzJb5X(UP`_%
z(-vm2M4ojclL7nehmZwaHrdEwWD1ad4yoO7srBRO`coDsOD!cWwNO2+3d%rF7PL%Q>DqX$uO%OB{;x4z*}WpD
zg346aVa}#c@B=-sFLsNQl-RH4)*c$_6M9IdCf1tpCwYgcu>s2+G#!m2FRh5Im~2shtaw{HWuWOY$;mGUW~_;^aw7tj6N
zGuhxG+uOVQ9Nb*HT*+inWKUH6|f6+!B@sxx~CK$Zqji>Ldp98Z>X8nLb((5G2B*f?no&ob(f
zVqld+k!JgJy9i3!jx^mBm0J0zswG`uprv*9yiNKheVO!KFk}OSDx~$$7gqNfsyTJf
zkGV68C2p^yt}xwn^A+7JFxwwH%O+bRa|l^I_%_$Tv$YDkPp6-j$
zzC;CC>(Ea+54BZVdw7ngG;Kq23MJJ`rMz7`qg0z`NE|)=4HbV$QK+VBXCw2cCR6*R
z1Zmdsgom|$eZhTbx2#RAnH_M3q)&<~r>eh?zwA#{RiVmTv1DWmO=dj^=fHNFwqjj?
zHy}<6dYWk?Jxbp@n*#6b1p@fGtC>kl>&@U+?tJhha_o`0isx=-zLw0^n_hsT{xu3B
zJ$%R4zYIlGcBSan_~43d!|&m(vU46I#IQkUW{dX9hTus(*i
zGuogyRP>YCVbukUZx!;bRyLyxF2mth`v{=o3EZdTbezGe$);fK0#aw8T3J7?;u%fd
ztU2iH?GMp4pM%J{A4TNvVBaA8^h$2e&0g%X4&<2
zp>D%(22qbOlqO^L-aQ|(x&qzk;qn7X(h!Y-8~;Ou@0LGB%FyK(P=v-@BKx%Xs&yal@iw>(ewI|S*C
z&{Yz<@$|yp(~Dg^yM(hR3`pQF-qVogy$Pnl1d2EIppf?k6!OY+rpgN93?bP2Lg*aW
z=ENEottWdqt>wz6W&AZzSZBf|a06D0#t@i@=Qo^v4WRTQtx1<&ilR`tCo9*8Ui
zuZRX{EaGgXngXFX)Q-sNZulJpD{sCZ85=jCUmwr(YT}X{Y^C=T*G}xCMn6K~&&&c0
zMq2U);%Pmkru715kP7O5ji;!BtHg)g02&0UK~CNdETIxlE!n`5JitR#Kk#%rK)V0n
z0W@RyNLcAjzdnyIqh0w;v
z7xQj)p`WGSA5*AS^sY8%zDZni5{&mo2eh(TT`vr+
z_)RZLQNZ^?_L$avLG&39*^n)IU9Zh-e3%vpLoo>O`}
z`~5201_KsCRzI9=JoR&A-^o^(Hca!O&<;}Wj20Ye6|HY=r9s`Yv^^-lT&l0hCcBwb
z`61{WSWC*<1G3(U>5&mT}M4m;q
z>Rd|WB?t;htL4|zv7QYIY(C&q>dZnH=axDgU+bNacqe~VAd#Au9=CYH$
z35u9K$m@48`su|l)>F{NuZApe(*kOA2f}sZS=ITFg-bV}Y14XW^FVR!hcwqU0QaTn
z+;lC^+z>QOg-;{;%}nT0Tb=@j>For01RYst?I&@HRG?7EE|M;
z>v&Uq>u6P<%oHlykx|+H68=1!ROD^g``dD0dKFo{tgv<%s=0G|C|!rIS#R^2)=LC3
zaT4sKp8>4A7`8fY5TG8zTAi4rY#4Y685RS#kxi_CU+r$H;F(1&gEe?lq#{6ez8wvQ
zg=x3jK-OykYnJ8VDtCma`5G))e3)c6QW}4v9q@8Dc(=Bq#)?d;^$vnc?}tH9E@^6P
zNBhKIeN^xS*z2`>B{w$X=r>}V#t>@g20HI|sG+yvg__W+em5wd5k1(d8rqs_Xyb>Y
znp^(3N8W@?udCDlQPv-k7T%0%!T2%?UR!`6V)#|##vtlu!>_oo4XqlnefYO1*_04X
zKSAfFeh2!!AnGn56&-`1^6D%=trIX?vU4ddpOXu}^=RO_6ycs8)CkCVRo+HKdV6Tb
zutd~dv$9cg6{%1iT-+aEt11b_dcSJNJ71@ztMYh%tA@OT+8~
zpZsJ9jiEGl1+2WYn(>2!5UEq*)KMNft=;DXRi^eOFJg%FDZ4~=^t&hZb@y|u^fOW!
zq^9R-p9RU2Oa>;~$}bHiIoTFn($r85^oon*%zWU}GVP;Kw}4xRxzSl-0Lkxw5jL_D
z=3)EeyWlJ#R=KHouZ)3Ny`Dm$FI{rqqC{fcRc_;r4)%Wi!*f4|aE|NV3-oJIPyf-A
zrHwPNxUC02826{e#+#kXXw~eG`;E75dj`+Rnnu=fKXP+iw8!ATd5IUa|pnVG`W*cueXYv}1$iM{*+2o!3
zDactna@ef&!SlvH^Cm51)77jfT5&|1ZRri5=sD5)mFk#4J@nX#)Pc`K9&JIc7eRJ;
zyY+DAs
z=yQA7P^RZF?_3P+aczK@fQJJZM`r8B95*$;i>!GJD7o_eJL*;Jl0TJac@xPd4;Ze>
z0$Gj_Hl!K#+_${`T##p?!I(EOY>@|kdSWBNNzhzVNDd>`aq?qK5cH`Z;
z+n~*?q?Q?1Ib#kfc&IR9(UVJYmImfGCeqC`1eqZ
zJOvxr)@B?3DxHOw*e7A8;b)>+!N-&iDPhZ}9xS{6kzq_C$H|(PcN+g%zc|-;FBIN@
zZ~RDs;{>=EhhmcaPH6QslKkN)o+sPB^h{aj1=$#O6wjP@sLc75rPkXzY{abNv&_mxf=K%?@EZ90>AM8)xU82WL6U%otzY53-zrXv*3YU49{_Y$ngw6O<1c
z1kbi?9iH)K6~hU-UGbREWzR
z#OxC0GxRDJy){ZBXeKf~PWDLBeK{^d7h@=?WUptq$Z8x;`#Z0MS
zRG=cQSJv=JvZEd7@RAWejooPHI}pH8ty0mqk!z&pQp=&wVqHUGEskAuUxQBTrWx`3
zuGdLv+loMeZ`4>*u
zxSr;5oeaOo8@RH!tr^-XvUM4x!k1x^fCXOS0{FRq?SsKQbn&>`7=+uPjjqNn@h=?=
za=(>=+3P`;-|p25(B4z*gY0t{R4gP_sXrLEI#I2X*Tr)TBIAcHKympZ@a3U><5hHw
zQ8_53Y8y>0Og9Bbi47{z&x#}5YJ@>@;PXJ?mG42%C^%PsNvUTX>Uze(+1-en
zxo>STc6J0inN|6Ej9tYiw^LdGO6}$Ap{)z=ZL*Mjnuq=_6vd*8g}=DYrYmsE@#gLm
zpMz`@GNMQNF!2omY*5n&mjlr+$R=f@+Q1sFS3DR(J}$~005lwoAlECTyt0O`Y!2DT
z-vNzJ@XR+J?S13hU~owmW;)vsa`s@dUk5PBE(dZSLz+8|Bl%=vT|0C>Rtr$zzY4j2
z`3DsK9F*Wis9XC~JAP1(R{k4l6#s27^~*cx-@4w|_1v%Up+F~Ucus(Q;A}hK6qp7Y
z{D2n8-U3xCT!M%9ckLXwFgsHU*4cz4v_%FwP@lG_TxeDa^
zJf79xolU9CJH7IQF}TzV-f!0fZl_W7I*`~ItyuR)K)eC1>L+)j)h-{xE5Hj>^f24(
z!A0w4hf#c$gUQ{86ja>{Mbm9Jd7kSh7K~w*-yduS~`ek#j?U
z%Mck^A*^nof{}aufcw@XXjeC>$9Pt=9yR>4z>Du6K-~+;)=h3UKCfCyY0LxDXmKwU
z97{n-26&AxQsBRqY*RgkXxd3>BtW*zPHFH8o&{cN$M&bRP!I)_Ue-qGD9Fmfw0^}&
zP*g1JgUdY>MBl{uqwfmXu@LZK9TACH7|lPjms+gaA)306e<^uvW5NJ?L@)$kyw~
z4TL}r&h18_TtLm4aIS6TS>m%rRBJrQ{)=G{m<|Z-4B$hDW4b^Wsj@YGxO@wjguH&P
zHIVrq%oO0j8t3GKcQ_)0E5UQ0nhou!Zl1Z@AzRH`5sy(z*9o9l6046Lj?NJY?KV9^
z*8e%F@>OWi_&5y83|h;-8M%SwWJC9L!_Px1x*FA@_ff&(qagGCD+-q)2V7PI8aIIA
z+O{5Z(uEsddBH-zqQ}(Pbx8)o>%T^|+D{P_ls~dAS>sjtvfcQ+qqc;`YiI*B1p%j_
zU&)o-#uvUTY@{B8y?z$x0XK;+o<~WUDa4}Pa5+B*yJYp@G#{c>br9nOFQXtjH4DSK
z0U_DHP<0ih9&r`x=m>S2hBSYx=-da2zdC4q(XnM7S^2#Z7k8|mv4HHCXli}bZhYw$
zv%9xp?D{7ut-N9hmA*8HpdW(9mmS0CT3mCRjXin~R(YV?*wax2zp?`{n0F-#S?`FT
z`R!QkUD2b2LMAb*cSorN}g^vzP4gitMcJ43~?;9Rdc99)yu-=
zMbwSI1YW2b4eEQ~5?stP_fj1w9k?VcC~9XQBkF3!E(w5-Il2+}zz&5(
zw&S~`pOk2Y^|lXlRzg<#UN5OqvMvdHxaUJwlkLO?V|i93qf6~t7&MNj1*3oU<6N&o
z7P*dR(Pv;9-R_l_iGo-!6qNytUG~>L-j8>vk?T1WFDVj`pcg1P70xARN_JZoVCQ-?
zcsd7)VIkui%cL^prD))K64SZF!`v4F%BLXG6Gxh_A%w_#ppD+c06B9q$R!^l(mxhf
z{;{BxhlFf81+^cMbzP07o|Vm%R(4Y$Tepg91h2E*_-1niR&k~VM*L1EH6PLk%0y6n
z2L+99%R-{0gi1;u4#H&;Dfj3Fc*Mg#GxZ{
zu<=X=Yil`uY}r%rs~;4mzJEdk?_|tS$u96G^Q+l^476j6s02mjW(`0j`9$QbK=~&G4(diJrCcE}{5i9^ffrv{i4w
zs_KIXU>CGipOcC=5$lUzL^gg8Oyd$6i$B;8=#bLq@&K=pN(@GBV(fCj1Obzu155!g
zQA0x|Zh=AKc0$yNw}t?A45bBa=v?i9LAA`+s`H?zE_?w{1cT}c=vO@##j6)d_6~$s
z-}^k^L8MjxO&Dxi3fNi-c!N+~wVdQ${Vp=9cP;_!7FK%%}JgjC8a%--pMm5a>+R&x8meSg5;ZobY
z2+%Ii7RLZLNQ%7_Kh)mAu}ZZM!L;@X;%2oHFs*%$1DomwEdh)e44AMCP$j8q$m$wF
zuDcvF)!mDrI@!8a_Yh51w|X()X~Fv&4OO>M7`!2++Xeg!&UGJ1>Bn%X&&dM}=2`vF
zT)=+N)=z|U{Xt6slW@ZN<6Z{1_0Zbwk#o*1u71~Qrsp`huo0>C+&02fffri)?FbeW(m
zMT4g6p9kCkS<|buOw*f4YuZ5rxNqRKS76Cnob=Q%x+ULdyogPU5VQ}n6lk{iXx_`f
z5rBP4pQTg_D&@m2A&RF;Qz_+~%BE6!2&K}do@pv2NK?vLRnt%I)OUO;lA4Nq8V6%U
z+ud|Q&O7PD^0)WW(;P-vqeDKg^-|p8ADVfV{QRe<(cgQn6W1|DT}-y@86TyoZ+7|e
zckf3Bko+mItbgwp7=tN*soZ8t2U;@DbQxW;RIgt#}%ww9C&^LmD~fbl!!bPvZi{LkQvIPXWHtTj$jy?BIPXO??F7D^sNjr7(s`*QS4
z#u$$PMQ$xKbjgi#B4nv+C5D`YQ)h8GkKyQffz+N5LCWs|BNpM=BU+ci<)R2cW04QQ
zCGWd7RFmI!gN7CZMq!OX;{;3;a1csrXj1PY(a@xxL7cxCopXQHjn$SyHtaS)foz1<
zTd|XI#|J6RsAhOd#U1l!wu2%u#sUU#BUffiMH%1~`k~F~S@=^Faf%q{Xpr~0g=!=<
zmpcHA!l9ztA-QID)e=}4^0%+B2#+Y7Dyd^zIbF!DM&5%rG
zDa?`RJmezUt!S<^UU?X4+mMm=S0?=k`T_PSHGX!TmP9mDeOPK`VRDt34-L|~Qcp9&
zRa)4&jy`b*AR<+@rey#mAeoA@CZnNqJq%voQ!W3Ulo|(-^}e4$c4#lN(kr~oullK0
zV1n{G-al+*aAlS%oA9&@l(
zrs^a3`A4F2UIbmrDxYx><{CcS6zEaHGwr5je(oq&KHE`cmi%H>~v@Dxz2
z5Uj#C0wTxeFb>KBi})WXXc|Oz(BmjH=*1q?_%9!`py}A&V@InoQ{n8J4q5CDCj;p?
zm|Cmt)VUuDn;j#@olw|j`mxJ5>yh@I&`M;-w+*ZLu7;w{3#&}=KV21(5Mwu92u0$n
zR`|&_-@sU6_d)Z$nYKJxDgM53;wiNfq&I$WBt06V--HhrNDs
zyXEIgnf#k5yF~CVqaeDZg}PnYiyvfBs&^SwJSG$);F8Egm+a#xb;);B>au}x1q=8w
z@<|R=5Ou?+plL|Hn4{$mL@uyn=NM*+#lQ<)(+~SEBWOAqO@m+b!1)~181Wsc8V6iz
z#%{#y6Djp)Bh8!LiXU7A=f-c)$~}yfF2Ib*7@)`I*Ni|~`9ahpaWAQ1`S6QexE_jA
z5m_l;ZIvT==Gj7{G_3bidI_LhzJSV~f-F=6@QO1Oe$mA<(aL(0XYNUG8F>}74P8Ef
zyav^m_fh%?J(A}_Y;TRF)6&D8>sNqWUWCrk4v=HrQt56eN*>Pu
zyyWp_DB|E*PB)$?>)TzEVOqA7XEnD9`wJj*wUBj97d$zgSI^`PB|>DI;F9<=OkJ;m
z=bIxGPO`z%0G3Iq_i2Fp5ojxZok78k6ojoVgvY%I+K+7c9uF3}jMC`iWGn2=07o~a
z7k2LL3TzuGuxVdh;YQlP#49!bGtagoQhMsN9?p^;F4d8RYGIZ7%aiOyTjwLKO61O7
zM?L-@RlGipYo^KXzm=VOff*f5$X|N8Uqk$`g?Z-4{pya@Sp~6#|
zR`i2x9SRsbhVCY<=U3$9|D*Bi=yQg9LB4%H?5Ci(s!{trGLpK8HS~~HUE`lxr$1c&
zx~T?f>km?cgEIX&;fj)GA^ioWKj2do+KuSoHGC_!(!X=6pQ5C?@O^&P=`bdtzogki
z*ucEC3)^p`@$|K7xLE8m8|NSVV=GhLv#_(aDooA&8vo2olAM62TtH?S%FnollzAuY
zmFd_7?~x%MG47T&+~;OI){wST{cI~e=BTQ#10YL76_)Up=@e&d6d8sLe0C2`sPmWH
zFDcExE{kV18O){L>o!Ntmv!MqLjZ>Kwt5B3;1f`{;Kljo=mIy|7p$bA#;_3F$dDr&ZVlbiptY1b&%>OBExCY&y>NcElN5pJ=n3PF9jzSl;
zsajGUB%~vb&bdv
zS#M$SUAMOi^3S?5N+Ty7^CKL|l8qtSsu{AU!LkdoV=Z7Dz_)829rsX7?AtY;XL1{b
zl_jMzc^dVDw7HL_urlY;OG)6eG{KNx`Z)!b!LzEZl5&`P2UjbDRNY(5ODx-3fEdSe
zy37gDn~>pse?3`gX=alhlQN`QdZ}EpZUc5M
zI*A5NHkr6oQl$L;Hbw)~l3JScY!;;ht(q~-qsZm~rJWS)KOAJ;k}zyqC$z&d01in>
z$E|O&-taa)=-+GIylY4edlC21(s4y>f4PNU0lE=}`@pf4GZTyFZKq$;i;>KuXb-Kz
z_)P&^&jxbFtqw#?%fX8~Y0JXwW?cCWgs0Uqy@E2)OhZZTX%LI+O-O;05T52pdXvCD
zuJ<*Uo~$U~z0~WP1`VQ(hVDnT(nk$(%rx5MwiX?Lq$xZr`j965kY@?~Rm#qzwxNbL
zN?EiW7pTBs+Qa31L(L0XvHg&ZP>jD2YiQq8@a_ie661{@F8?s>xu6XHJv~Fg>7=yh
zvdzPOj&CW=`J^4INq=RZSOT~P4>RSzf%aFKkAhcpS2y3L(rM+LLEVPQJZ$)(sB8QP
zUalPQHE1gu*=|s>
zTAd{M7px_3nUL4Y*n;V-
z{b8WPiONtfNe(~*Z)0-k6@N(%+oIn-!{^xfaFH4Xq+7S->nZ8k1CF%MbS71|v@gDC
zFwhGYleDLh)h;qzhD|Hqq#3Q<830jy?3_iUM5MkII#V6<9@$^CqBwVrX46`fUGy~)hulk5-RCD#Os!L$XHl@1-LC6=kCNSo)VNNtabWzuXw7FWIC;L5b
zqhEd?r`*xCimbjB8M3v^JArY~6N4;r7Jd-vaxz5a0vdKjNL2vhi_tzL2dukVgjNDQ
z(a*g!z!tJGxjrE(^K3m&UvG)g^;eNIdf+@$x&ny>c^`pGSi(T1UTUchQZrgpJc}=F
zVE~t|H0d-F0V*4e8akm$$cm4KI{_{S1=bEI8iN=oSiAuln>*n=5ry=|kg^|yX$dH?
zdr2AjEuSDz6!0pZ2)UE4O(|SwRAra#FDS|B3C|9f=vKz+N7Tjyx3`z
z@}#=56H}4t`poI-428B#T(7jnoS`n2*G%=tmvY6$_|y3FGyXgNTdKufpyP7#ApP9^wcD|VBFE?x*($^J9R(snKcTo>CHK+GDg64@a
z*>DZQiA?xJpm%K^HsPp|UQi=W4>ZfA6vhGBL3Hgxqe2C9`P
zZj%j6wi~F?(B5KFpG3`*Ivj5EWR|3vCwCm=Hczp}begAhoIKC8bn!5sGN#p0;Wk~|
z)MUCkk93=E%d&|Np*Xj+neNRp<8eRcF+J9|*pH~Q>FKy=9u+L4;m4G=nO@h6ou;>=
zu+#KeSvJ$xQSLVV7RO-uJLCgl)a6uPPe^1Bj2XZk%|O?I^UR>-hhOfNjb^aJVKYM=
zhuh3B)-}T&r*&ecRlR00Hw~G^9kp&V!sS6`r1x!`8KvhkqaC@OW{h{$jIBM!ZI)OW
zoo30}(~zcHLqjjDGfO?6xy{nGhtD(1+?Lxc>v+*-mNTZ9;X
zW=-f^K^-=Z_xx_E@SnA>
z-x}JB;(p#k=kYoeiM1^}lW{P`H2SKrlDW6%?+gAZl(
zD8*h4w$BtyW+*z#)f)Yq;)uM0Q8y)bpINThK>v8^Rl@ocNon=S1=Ze=`FKi@{*g$|
zX=HektZmWuHu^+AKxuU{_LJ2psAXtNF{m#mlwBd`q*mO_2$t9&DOs6RAzwC?TL4v?
z7&Yn+%@ROXZw%IZf>Rd*>jX`20Dn-{-8we*i&noaj<
zJTamnK<5XMEz)_^c+Aro8~3_EaytEtb8pBoMVtK9GgfG4WB>;IK>uEAXiZx`>8AsY
robrQ|owa>aoRhN_X(Mf!48lrA?l?kCzkiF4Q8iUAs#=ol_WAz{i*386
literal 0
HcmV?d00001
diff --git a/test/performance/vendor/golang.org/x/net/publicsuffix/data/text b/test/performance/vendor/golang.org/x/net/publicsuffix/data/text
new file mode 100644
index 000000000..124dcd61f
--- /dev/null
+++ b/test/performance/vendor/golang.org/x/net/publicsuffix/data/text
@@ -0,0 +1 @@
+billustrationionjukudoyamakeupowiathletajimageandsoundandvision-riopretobishimagentositecnologiabiocelotenkawabipanasonicatfoodnetworkinggroupperbirdartcenterprisecloudaccesscamdvrcampaniabirkenesoddtangenovarahkkeravjuegoshikikiraraholtalenishikatakazakindependent-revieweirbirthplaceu-1bitbucketrzynishikatsuragirlyuzawabitternidiscoverybjarkoybjerkreimdbaltimore-og-romsdalp1bjugnishikawazukamishihoronobeautydalwaysdatabaseballangenkainanaejrietisalatinabenogatabitorderblackfridaybloombergbauernishimerabloxcms3-website-us-west-2blushakotanishinomiyashironocparachutingjovikarateu-2bmoattachmentsalangenishinoomotegovtattoolforgerockartuzybmsalon-1bmwellbeingzoneu-3bnrwesteuropenairbusantiquesaltdalomzaporizhzhedmarkaratsuginamikatagamilanotairesistanceu-4bondigitaloceanspacesaludishangrilanciabonnishinoshimatsusakahoginankokubunjindianapolis-a-bloggerbookonlinewjerseyboomlahppiacenzachpomorskienishiokoppegardiskussionsbereichattanooganordkapparaglidinglassassinationalheritageu-north-1boschaefflerdalondonetskarelianceu-south-1bostik-serveronagasukevje-og-hornnesalvadordalibabalatinord-aurdalipaywhirlondrinaplesknsalzburgleezextraspace-to-rentalstomakomaibarabostonakijinsekikogentappssejnyaarparalleluxembourglitcheltenham-radio-opensocialorenskogliwicebotanicalgardeno-staginglobodoes-itcouldbeworldisrechtranakamurataiwanairforcechireadthedocsxeroxfinitybotanicgardenishitosashimizunaminamiawajikindianmarketinglogowestfalenishiwakindielddanuorrindigenamsskoganeindustriabotanyanagawallonieruchomoscienceandindustrynissandiegoddabouncemerckmsdnipropetrovskjervoyageorgeorgiabounty-fullensakerrypropertiesamegawaboutiquebecommerce-shopselectaxihuanissayokkaichintaifun-dnsaliasamnangerboutireservditchyouriparasiteboyfriendoftheinternetflixjavaldaostathellevangerbozen-sudtirolottokorozawabozen-suedtirolouvreisenissedalovepoparisor-fronisshingucciprianiigataipeidsvollovesickariyakumodumeloyalistoragebplaceducatorprojectcmembersampalermomahaccapooguybrandywinevalleybrasiliadboxosascoli-picenorddalpusercontentcp4bresciaokinawashirosatobamagazineuesamsclubartowestus2brindisibenikitagataikikuchikumagayagawalmartgorybristoloseyouriparliamentjeldsundivtasvuodnakaniikawatanagurabritishcolumbialowiezaganiyodogawabroadcastlebtimnetzlgloomy-routerbroadwaybroke-itvedestrandivttasvuotnakanojohanamakindlefrakkestadiybrokerbrothermesaverdeatnulmemergencyachtsamsungloppennebrowsersafetymarketsandnessjoenl-ams-1brumunddalublindesnesandoybrunelastxn--0trq7p7nnbrusselsandvikcoromantovalle-daostavangerbruxellesanfranciscofreakunekobayashikaoirmemorialucaniabryanskodjedugit-pagespeedmobilizeroticagliaricoharuovatlassian-dev-builderscbglugsjcbnpparibashkiriabrynewmexicoacharterbuzzwfarmerseinebwhalingmbhartiffany-2bzhitomirbzzcodyn-vpndnsantacruzsantafedjeffersoncoffeedbackdropocznordlandrudupontariobranconavstackasaokamikoaniikappudownloadurbanamexhibitioncogretakamatsukawacollectioncolognewyorkshirebungoonordre-landurhamburgrimstadynamisches-dnsantamariakecolonialwilliamsburgripeeweeklylotterycoloradoplateaudnedalncolumbusheycommunexus-3community-prochowicecomobaravendbambleborkapsicilyonagoyauthgear-stagingivestbyglandroverhallair-traffic-controlleyombomloabaths-heilbronnoysunddnslivegarsheiheijibigawaustraliaustinnfshostrolekamisatokaizukameyamatotakadaustevollivornowtv-infolldalolipopmcdircompanychipstmncomparemarkerryhotelsantoandrepbodynaliasnesoddenmarkhangelskjakdnepropetrovskiervaapsteigenflfannefrankfurtjxn--12cfi8ixb8lutskashibatakashimarshallstatebankashiharacomsecaaskimitsubatamibuildingriwatarailwaycondoshichinohealth-carereformemsettlersanukindustriesteamfamberlevagangaviikanonjinfinitigotembaixadaconferenceconstructionconsuladogadollsaobernardomniweatherchanneluxuryconsultanthropologyconsultingroks-thisayamanobeokakegawacontactkmaxxn--12co0c3b4evalled-aostamayukinsuregruhostingrondarcontagematsubaravennaharimalborkashiwaracontemporaryarteducationalchikugodonnakaiwamizawashtenawsmppl-wawdev-myqnapcloudcontrolledogawarabikomaezakirunoopschlesischesaogoncartoonartdecologiacontractorskenconventureshinodearthickashiwazakiyosatokamachilloutsystemscloudsitecookingchannelsdvrdnsdojogaszkolancashirecifedexetercoolblogdnsfor-better-thanawassamukawatarikuzentakatairavpagecooperativano-frankivskygearapparochernigovernmentksatxn--1ck2e1bananarepublic-inquiryggeebinatsukigatajimidsundevelopmentatarantours3-external-1copenhagencyclopedichiropracticatholicaxiashorokanaiecoproductionsaotomeinforumzcorporationcorsicahcesuoloanswatch-and-clockercorvettenrissagaeroclubmedecincinnativeamericanantiquest-le-patron-k3sapporomuracosenzamamidorittoeigersundynathomebuiltwithdarkasserverrankoshigayaltakasugaintelligencecosidnshome-webservercellikescandypoppdaluzerncostumedicallynxn--1ctwolominamatargets-itlon-2couchpotatofriesardegnarutomobegetmyiparsardiniacouncilvivanovoldacouponsarlcozoracq-acranbrookuwanalyticsarpsborgrongausdalcrankyowariasahikawatchandclockasukabeauxartsandcraftsarufutsunomiyawakasaikaitabashijonawatecrdyndns-at-homedepotaruinterhostsolutionsasayamatta-varjjatmpartinternationalfirearmsaseboknowsitallcreditcardyndns-at-workshoppingrossetouchigasakitahiroshimansionsaskatchewancreditunioncremonashgabadaddjaguarqcxn--1lqs03ncrewhmessinarashinomutashinaintuitoyosatoyokawacricketnedalcrimeast-kazakhstanangercrotonecrownipartsassarinuyamashinazawacrsaudacruisesauheradyndns-blogsitextilegnicapetownnews-stagingroundhandlingroznycuisinellancasterculturalcentertainmentoyotapartysvardocuneocupcakecuritibabymilk3curvallee-d-aosteinkjerusalempresashibetsurugashimaringatlantajirinvestmentsavannahgacutegirlfriendyndns-freeboxoslocalzonecymrulvikasumigaurawa-mazowszexnetlifyinzairtrafficplexus-1cyonabarumesswithdnsaveincloudyndns-homednsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacyouthruherecipescaracaltanissettaishinomakilovecollegefantasyleaguernseyfembetsukumiyamazonawsglobalacceleratorahimeshimabaridagawatchesciencecentersciencehistoryfermockasuyamegurownproviderferraraferraris-a-catererferrerotikagoshimalopolskanlandyndns-picsaxofetsundyndns-remotewdyndns-ipasadenaroyfgujoinvilleitungsenfhvalerfidontexistmein-iservschulegallocalhostrodawarafieldyndns-serverdalfigueresindevicenzaolkuszczytnoipirangalsaceofilateliafilegear-augustowhoswholdingsmall-webthingscientistordalfilegear-debianfilegear-gbizfilegear-iefilegear-jpmorganfilegear-sg-1filminamiechizenfinalfinancefineartscrapper-sitefinlandyndns-weblikes-piedmonticellocus-4finnoyfirebaseappaviancarrdyndns-wikinkobearalvahkijoetsuldalvdalaskanittedallasalleasecuritytacticschoenbrunnfirenetoystre-slidrettozawafirenzefirestonefirewebpaascrappingulenfirmdaleikangerfishingoldpoint2thisamitsukefitjarvodkafjordyndns-workangerfitnessettlementozsdellogliastradingunmanxn--1qqw23afjalerfldrvalleeaosteflekkefjordyndns1flesberguovdageaidnunjargaflickragerogerscrysecretrosnubar0flierneflirfloginlinefloppythonanywhereggio-calabriafloraflorencefloridatsunangojomedicinakamagayahabackplaneapplinzis-a-celticsfanfloripadoval-daostavalleyfloristanohatakahamalselvendrellflorokunohealthcareerscwienflowerservehalflifeinsurancefltrani-andria-barletta-trani-andriaflynnhosting-clusterfnchiryukyuragifuchungbukharanzanfndynnschokokekschokoladenfnwkaszubytemarkatowicefoolfor-ourfor-somedio-campidano-mediocampidanomediofor-theaterforexrothachijolsterforgotdnservehttpbin-butterforli-cesena-forlicesenaforlillesandefjordynservebbscholarshipschoolbusinessebyforsaleirfjordynuniversityforsandasuolodingenfortalfortefortmissoulangevagrigentomologyeonggiehtavuoatnagahamaroygardencowayfortworthachinoheavyfosneservehumourfotraniandriabarlettatraniandriafoxfordecampobassociatest-iserveblogsytemp-dnserveirchitachinakagawashingtondchernivtsiciliafozfr-par-1fr-par-2franamizuhobby-sitefrancaiseharafranziskanerimalvikatsushikabedzin-addrammenuorochesterfredrikstadtvserveminecraftranoyfreeddnsfreebox-oservemp3freedesktopfizerfreemasonryfreemyiphosteurovisionfreesitefreetlservep2pgfoggiafreiburgushikamifuranorfolkebibleksvikatsuyamarugame-hostyhostingxn--2m4a15efrenchkisshikirkeneservepicservequakefreseniuscultureggio-emilia-romagnakasatsunairguardiannakadomarinebraskaunicommbankaufentigerfribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesignfrognfrolandynv6from-akrehamnfrom-alfrom-arfrom-azurewebsiteshikagamiishibukawakepnoorfrom-capitalonewportransipharmacienservicesevastopolefrom-coalfrom-ctranslatedynvpnpluscountryestateofdelawareclaimschoolsztynsettsupportoyotomiyazakis-a-candidatefrom-dchitosetodayfrom-dediboxafrom-flandersevenassisienarvikautokeinoticeablewismillerfrom-gaulardalfrom-hichisochikuzenfrom-iafrom-idyroyrvikingruenoharafrom-ilfrom-in-berlindasewiiheyaizuwakamatsubushikusakadogawafrom-ksharpharmacyshawaiijimarcheapartmentshellaspeziafrom-kyfrom-lanshimokawafrom-mamurogawatsonfrom-mdfrom-medizinhistorischeshimokitayamattelekommunikationfrom-mifunefrom-mnfrom-modalenfrom-mshimonitayanagit-reposts-and-telecommunicationshimonosekikawafrom-mtnfrom-nchofunatoriginstantcloudfrontdoorfrom-ndfrom-nefrom-nhktistoryfrom-njshimosuwalkis-a-chefarsundyndns-mailfrom-nminamifuranofrom-nvalleedaostefrom-nynysagamiharafrom-ohdattorelayfrom-oketogolffanshimotsukefrom-orfrom-padualstackazoologicalfrom-pratogurafrom-ris-a-conservativegashimotsumayfirstockholmestrandfrom-schmidtre-gauldalfrom-sdscloudfrom-tnfrom-txn--2scrj9chonanbunkyonanaoshimakanegasakikugawaltervistailscaleforcefrom-utsiracusaikirovogradoyfrom-vald-aostarostwodzislawildlifestylefrom-vtransportefrom-wafrom-wiardwebview-assetshinichinanfrom-wvanylvenneslaskerrylogisticshinjournalismartlabelingfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairkitapps-auction-rancherkasydneyfujinomiyadattowebhoptogakushimotoganefujiokayamandalfujisatoshonairlinedre-eikerfujisawafujishiroishidakabiratoridedyn-berlincolnfujitsuruokazakiryuohkurafujiyoshidavvenjargap-east-1fukayabeardubaiduckdnsncfdfukuchiyamadavvesiidappnodebalancertmgrazimutheworkpccwilliamhillfukudomigawafukuis-a-cpalacefukumitsubishigakisarazure-mobileirvikazteleportlligatransurlfukuokakamigaharafukuroishikarikaturindalfukusakishiwadazaifudaigokaseljordfukuyamagatakaharunusualpersonfunabashiriuchinadafunagatakahashimamakisofukushimangonnakatombetsumy-gatewayfunahashikamiamakusatsumasendaisenergyfundaciofunkfeuerfuoiskujukuriyamangyshlakasamatsudoomdnstracefuosskoczowinbar1furubirafurudonostiaafurukawajimaniwakuratefusodegaurafussaintlouis-a-anarchistoireggiocalabriafutabayamaguchinomihachimanagementrapaniizafutboldlygoingnowhere-for-morenakatsugawafuttsurutaharafuturecmshinjukumamotoyamashikefuturehostingfuturemailingfvghamurakamigoris-a-designerhandcraftedhandsonyhangglidinghangoutwentehannanmokuizumodenaklodzkochikuseihidorahannorthwesternmutualhanyuzenhapmircloudletshintokushimahappounzenharvestcelebrationhasamap-northeast-3hasaminami-alpshintomikasaharahashbangryhasudahasura-apphiladelphiaareadmyblogspotrdhasvikfh-muensterhatogayahoooshikamaishimofusartshinyoshitomiokamisunagawahatoyamazakitakatakanabeatshiojirishirifujiedahatsukaichikaiseiyoichimkentrendhostinghattfjelldalhayashimamotobusellfylkesbiblackbaudcdn-edgestackhero-networkisboringhazuminobushistoryhelplfinancialhelsinkitakyushuaiahembygdsforbundhemneshioyanaizuerichardlimanowarudahemsedalhepforgeblockshirahamatonbetsurgeonshalloffameiwamasoyheroyhetemlbfanhgtvaohigashiagatsumagoianiahigashichichibuskerudhigashihiroshimanehigashiizumozakitamigrationhigashikagawahigashikagurasoedahigashikawakitaaikitamotosunndalhigashikurumeeresinstaginghigashimatsushimarburghigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshirakokonoehigashinarusells-for-lesshiranukamitondabayashiogamagoriziahigashinehigashiomitamanortonsberghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitanakagusukumodernhigashitsunosegawahigashiurausukitashiobarahigashiyamatokoriyamanashifteditorxn--30rr7yhigashiyodogawahigashiyoshinogaris-a-doctorhippyhiraizumisatohnoshoohirakatashinagawahiranairportland-4-salernogiessennanjobojis-a-financialadvisor-aurdalhirarahiratsukaerusrcfastlylbanzaicloudappspotagerhirayaitakaokalmykiahistorichouseshiraois-a-geekhakassiahitachiomiyagildeskaliszhitachiotagonohejis-a-greenhitraeumtgeradegreehjartdalhjelmelandholeckodairaholidayholyhomegoodshiraokamitsuehomeiphilatelyhomelinkyard-cloudjiffyresdalhomelinuxn--32vp30hachiojiyahikobierzycehomeofficehomesecuritymacaparecidahomesecuritypchoseikarugamvikarlsoyhomesenseeringhomesklepphilipsynology-diskstationhomeunixn--3bst00minamiiserniahondahongooglecodebergentinghonjyoitakarazukaluganskharkivaporcloudhornindalhorsells-for-ustkanmakiwielunnerhortendofinternet-dnshiratakahagitapphoenixn--3ds443ghospitalhoteleshishikuis-a-guruhotelwithflightshisognehotmailhoyangerhoylandetakasagophonefosshisuifuettertdasnetzhumanitieshitaramahungryhurdalhurumajis-a-hard-workershizukuishimogosenhyllestadhyogoris-a-hunterhyugawarahyundaiwafuneis-into-carsiiitesilkharkovaresearchaeologicalvinklein-the-bandairtelebitbridgestoneenebakkeshibechambagricultureadymadealstahaugesunderseaportsinfolionetworkdalaheadjudygarlandis-into-cartoonsimple-urlis-into-gamesserlillyis-leetrentin-suedtirolis-lostre-toteneis-a-lawyeris-not-certifiedis-savedis-slickhersonis-uberleetrentino-a-adigeis-very-badajozis-a-liberalis-very-evillageis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandovre-eikerisleofmanaustdaljellybeanjenv-arubahccavuotnagaragusabaerobaticketsirdaljeonnamerikawauejetztrentino-aadigejevnakershusdecorativeartslupskhmelnytskyivarggatrentino-alto-adigejewelryjewishartgalleryjfkhplaystation-cloudyclusterjgorajlljls-sto1jls-sto2jls-sto3jmphotographysiojnjaworznospamproxyjoyentrentino-altoadigejoyokaichibajddarchitecturealtorlandjpnjprslzjurkotohiradomainstitutekotourakouhokutamamurakounosupabasembokukizunokunimilitarykouyamarylhurstjordalshalsenkouzushimasfjordenkozagawakozakis-a-llamarnardalkozowindowskrakowinnersnoasakatakkokamiminersokndalkpnkppspbarcelonagawakkanaibetsubamericanfamilyds3-fips-us-gov-west-1krasnikahokutokashikis-a-musiciankrasnodarkredstonekrelliankristiansandcatsolarssonkristiansundkrodsheradkrokstadelvalle-aostatic-accessolognekryminamiizukaminokawanishiaizubangekumanotteroykumatorinovecoregontrailroadkumejimashikis-a-nascarfankumenantokonamegatakatoris-a-nursells-itrentin-sud-tirolkunisakis-a-painteractivelvetrentin-sudtirolkunitachiaraindropilotsolundbecknx-serversellsyourhomeftphxn--3e0b707ekunitomigusukuleuvenetokigawakunneppuboliviajessheimpertrixcdn77-secureggioemiliaromagnamsosnowiechristiansburgminakamichiharakunstsammlungkunstunddesignkuokgroupimientaketomisatoolsomakurehabmerkurgankurobeeldengeluidkurogimimatakatsukis-a-patsfankuroisoftwarezzoologykuromatsunais-a-personaltrainerkuronkurotakikawasakis-a-photographerokussldkushirogawakustanais-a-playershiftcryptonomichigangwonkusupersalezajskomakiyosemitekutchanelkutnowruzhgorodeokuzumakis-a-republicanonoichinomiyakekvafjordkvalsundkvamscompute-1kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsomnatalkzmisakis-a-soxfanmisasaguris-a-studentalmisawamisconfusedmishimasudamissilemisugitokuyamatsumaebashikshacknetrentino-sued-tirolmitakeharamitourismilemitoyoakemiuramiyazurecontainerdpolicemiyotamatsukuris-a-teacherkassyno-dshowamjondalenmonstermontrealestatefarmequipmentrentino-suedtirolmonza-brianzapposor-odalmonza-e-della-brianzaptokyotangotpantheonsitemonzabrianzaramonzaebrianzamonzaedellabrianzamoonscalebookinghostedpictetrentinoa-adigemordoviamoriyamatsumotofukemoriyoshiminamiashigaramormonmouthachirogatakamoriokakudamatsuemoroyamatsunomortgagemoscowiosor-varangermoseushimodatemosjoenmoskenesorfoldmossorocabalena-devicesorreisahayakawakamiichikawamisatottoris-a-techietis-a-landscaperspectakasakitchenmosvikomatsushimarylandmoteginowaniihamatamakinoharamoviemovimientolgamozilla-iotrentinoaadigemtranbytomaritimekeepingmuginozawaonsensiositemuikaminoyamaxunispacemukoebenhavnmulhouseoullensvanguardmunakatanemuncienciamuosattemupinbarclaycards3-sa-east-1murmanskomforbar2murotorcraftrentinoalto-adigemusashinoharamuseetrentinoaltoadigemuseumverenigingmusicargodaddyn-o-saurlandesortlandmutsuzawamy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoruminamimakis-a-rockstarachowicemydattolocalcertificationmyddnsgeekgalaxymydissentrentinos-tirolmydobissmarterthanyoumydrobofageologymydsoundcastronomy-vigorlicemyeffectrentinostirolmyfastly-terrariuminamiminowamyfirewalledreplittlestargardmyforuminamioguni5myfritzmyftpaccessouthcarolinaturalhistorymuseumcentermyhome-servermyjinomykolaivencloud66mymailermymediapchristmasakillucernemyokohamamatsudamypepinkommunalforbundmypetsouthwest1-uslivinghistorymyphotoshibalashovhadanorth-kazakhstanmypicturestaurantrentinosud-tirolmypsxn--3pxu8kommunemysecuritycamerakermyshopblocksowamyshopifymyspreadshopwarendalenugmythic-beastspectruminamisanrikubetsuppliesoomytis-a-bookkeepermaritimodspeedpartnermytuleap-partnersphinxn--41amyvnchromediatechnologymywirepaircraftingvollohmusashimurayamashikokuchuoplantationplantspjelkavikomorotsukagawaplatformsharis-a-therapistoiaplatter-appinokofuefukihaboromskogplatterpioneerplazaplcube-serversicherungplumbingoplurinacionalpodhalepodlasiellaktyubinskiptveterinairealmpmnpodzonepohlpoivronpokerpokrovskomvuxn--3hcrj9choyodobashichikashukujitawaraumalatvuopmicrosoftbankarmoypoliticarrierpolitiendapolkowicepoltavalle-d-aostaticspydebergpomorzeszowitdkongsbergponpesaro-urbino-pesarourbinopesaromasvuotnarusawapordenonepornporsangerporsangugeporsgrunnanyokoshibahikariwanumatakinouepoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrprimetelemarkongsvingerprincipeprivatizehealthinsuranceprofesionalprogressivestfoldpromombetsupplypropertyprotectionprotonetrentinosued-tirolprudentialpruszkowithgoogleapiszprvcyberprzeworskogpulawypunyufuelveruminamiuonumassa-carrara-massacarraramassabuyshousesopotrentino-sud-tirolpupugliapussycateringebuzentsujiiepvhadselfiphdfcbankazunoticiashinkamigototalpvtrentinosuedtirolpwchungnamdalseidsbergmodellingmxn--11b4c3dray-dnsupdaterpzqhaebaruericssongdalenviknakayamaoris-a-cubicle-slavellinodeobjectshinshinotsurfashionstorebaselburguidefinimamateramochizukimobetsumidatlantichirurgiens-dentistes-en-franceqldqotoyohashimotoshimatsuzakis-an-accountantshowtimelbourneqponiatowadaqslgbtrentinsud-tirolqualifioappippueblockbusternopilawaquickconnectrentinsudtirolquicksytesrhtrentinsued-tirolquipelementsrltunestuff-4-saletunkonsulatrobeebyteappigboatsmolaquilanxessmushcdn77-sslingturystykaniepcetuscanytushuissier-justicetuvalleaostaverntuxfamilytwmailvestvagoyvevelstadvibo-valentiavibovalentiavideovillastufftoread-booksnestorfjordvinnicasadelamonedagestangevinnytsiavipsinaappiwatevirginiavirtual-uservecounterstrikevirtualcloudvirtualservervirtualuserveexchangevirtuelvisakuhokksundviterbolognagasakikonaikawagoevivianvivolkenkundenvixn--42c2d9avlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavminanovologdanskonyveloftrentino-stirolvolvolkswagentstuttgartrentinsuedtirolvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiecircustomer-ocimmobilienwixsitewloclawekoobindalwmcloudwmflabsurnadalwoodsidelmenhorstabackyardsurreyworse-thandawowithyoutuberspacekitagawawpdevcloudwpenginepoweredwphostedmailwpmucdnpixolinodeusercontentrentinosudtirolwpmudevcdnaccessokanagawawritesthisblogoipizzawroclawiwatsukiyonoshiroomgwtcirclerkstagewtfastvps-serverisignwuozuwzmiuwajimaxn--4gbriminingxn--4it168dxn--4it797kooris-a-libertarianxn--4pvxs4allxn--54b7fta0ccivilaviationredumbrellajollamericanexpressexyxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49civilisationrenderxn--5rtq34koperviklabudhabikinokawachinaganoharamcocottempurlxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationthewifiatmallorcafederation-webspacexn--80aaa0cvacationsusonoxn--80adxhksuzakananiimiharuxn--80ao21axn--80aqecdr1axn--80asehdbarclays3-us-east-2xn--80aswgxn--80aukraanghkembuchikujobservableusercontentrevisohughestripperxn--8dbq2axn--8ltr62koryokamikawanehonbetsuwanouchijiwadeliveryxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisenbahnxn--90a3academiamicable-modemoneyxn--90aeroportalabamagasakishimabaraffleentry-snowplowiczeladzxn--90aishobarakawaharaoxn--90amckinseyxn--90azhytomyrxn--9dbhblg6dietritonxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexcloudxn--asky-iraxn--aurskog-hland-jnbarefootballooningjerstadgcapebretonamicrolightingjesdalombardiadembroideryonagunicloudiherokuappanamasteiermarkaracoldwarszawauthgearappspacehosted-by-previderxn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsuzukanazawaxn--bck1b9a5dre4civilwarmiasadoesntexisteingeekarpaczest-a-la-maisondre-landrayddns5yxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurgeryxn--bjddar-ptargithubpreviewsaitohmannore-og-uvdalxn--blt-elabourxn--bmlo-graingerxn--bod-2naturalsciencesnaturellesuzukis-an-actorxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-acornxn--brum-voagatroandinosaureportrentoyonakagyokutoyakomaganexn--btsfjord-9zaxn--bulsan-sdtirol-nsbaremetalpha-myqnapcloud9guacuiababia-goracleaningitpagexlimoldell-ogliastraderxn--c1avgxn--c2br7gxn--c3s14mincomcastreserve-onlinexn--cck2b3bargainstances3-us-gov-west-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-an-actresshwindmillxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694barreaudiblebesbydgoszczecinemagnethnologyoriikaragandauthordalandroiddnss3-ap-southeast-2ix4432-balsan-suedtirolimiteddnskinggfakefurniturecreationavuotnaritakoelnayorovigotsukisosakitahatakahatakaishimoichinosekigaharaurskog-holandingitlaborxn--czrs0trogstadxn--czru2dxn--czrw28barrel-of-knowledgeappgafanquanpachicappacificurussiautomotivelandds3-ca-central-16-balsan-sudtirollagdenesnaaseinet-freaks3-ap-southeast-123websiteleaf-south-123webseiteckidsmynasushiobarackmazerbaijan-mayen-rootaribeiraogakibichuobiramusementdllpages3-ap-south-123sitewebhareidfjordvagsoyerhcloudd-dnsiskinkyolasiteastcoastaldefenceastus2038xn--d1acj3barrell-of-knowledgecomputerhistoryofscience-fictionfabricafjs3-us-west-1xn--d1alfaromeoxn--d1atromsakegawaxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmailukowhitesnow-dnsangohtawaramotoineppubtlsanjotelulubin-brbambinagisobetsuitagajoburgjerdrumcprequalifymein-vigorgebetsukuibmdeveloperauniteroizumizakinderoyomitanobninskanzakiyokawaraustrheimatunduhrennebulsan-suedtirololitapunk123kotisivultrobjectselinogradimo-siemenscaledekaascolipiceno-ipifony-1337xn--eckvdtc9dxn--efvn9svalbardunloppaderbornxn--efvy88hagakhanamigawaxn--ehqz56nxn--elqq16hagebostadxn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaerodromegallupaasdaburxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsvchurchaseljeepsondriodejaneirockyotobetsuliguriaxn--fiq64barsycenterprisesakievennodesadistcgrouplidlugolekagaminord-frontierxn--fiqs8sveioxn--fiqz9svelvikoninjambylxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbssvizzeraxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grandrapidsvn-repostorjcloud-ver-jpchowderxn--frna-woaraisaijosoyroroswedenxn--frya-hraxn--fzc2c9e2cleverappsannanxn--fzys8d69uvgmailxn--g2xx48clicketcloudcontrolapparmatsuuraxn--gckr3f0fauskedsmokorsetagayaseralingenoamishirasatogliattipschulserverxn--gecrj9clickrisinglesannohekinannestadraydnsanokaruizawaxn--ggaviika-8ya47haibarakitakamiizumisanofidelitysfjordxn--gildeskl-g0axn--givuotna-8yasakaiminatoyookaneyamazoexn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-an-anarchistoricalsocietysnesigdalxn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45br5cylxn--gnstigliefern-wobihirosakikamijimatsushigexn--h-2failxn--h1aeghair-surveillancexn--h1ahnxn--h1alizxn--h2breg3eveneswidnicasacampinagrandebungotakadaemongolianxn--h2brj9c8clinichippubetsuikilatironporterxn--h3cuzk1digickoseis-a-linux-usershoujis-a-knightpointtohoboleslawieconomiastalbanshizuokamogawaxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinewhampshirealtychyattorneyagawakuyabukihokumakogeniwaizumiotsurugimbalsfjordeportexaskoyabeagleboardetroitskypecorivneatonoshoes3-eu-west-3utilitiesquare7xn--hebda8basicserversaillesjabbottateshinanomachildrensgardenhlfanhsbc66xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-aptibleangaviikadenaamesjevuemielnoboribetsuckswidnikkolobrzegersundxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugithubusercontentromsojamisonxn--io0a7is-an-artistgstagexn--j1adpkomonotogawaxn--j1aefbsbxn--1lqs71dyndns-office-on-the-webhostingrpassagensavonarviikamiokameokamakurazakiwakunigamihamadaxn--j1ael8basilicataniautoscanadaeguambulancentralus-2xn--j1amhakatanorthflankddiamondshinshiroxn--j6w193gxn--jlq480n2rgxn--jlq61u9w7basketballfinanzgorzeleccodespotenzakopanewspaperxn--jlster-byasuokannamihokkaidopaaskvollxn--jrpeland-54axn--jvr189miniserversusakis-a-socialistg-builderxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45brj9cistrondheimperiaxn--koluokta-7ya57hakodatexn--kprw13dxn--kpry57dxn--kput3is-an-engineeringxn--krager-gyatominamibosogndalxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdevcloudfunctionsimplesitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanoyakagexn--kvnangen-k0axn--l-1fairwindswiebodzin-dslattuminamiyamashirokawanabeepilepsykkylvenicexn--l1accentureklamborghinikolaeventswinoujscienceandhistoryxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52batochigifts3-us-west-2xn--lesund-huaxn--lgbbat1ad8jdfaststackschulplattformetacentrumeteorappassenger-associationxn--lgrd-poacctrusteexn--lhppi-xqaxn--linds-pramericanartrvestnestudioxn--lns-qlavagiskexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacliniquedapliexn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddeswisstpetersburgxn--mgb9awbfbx-ostrowwlkpmguitarschwarzgwangjuifminamidaitomanchesterxn--mgba3a3ejtrycloudflarevistaplestudynamic-dnsrvaroyxn--mgba3a4f16axn--mgba3a4fra1-deloittevaksdalxn--mgba7c0bbn0axn--mgbaakc7dvfstdlibestadxn--mgbaam7a8hakonexn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscordsays3-website-ap-northeast-1xn--mgbai9azgqp6jejuniperxn--mgbayh7gpalmaseratis-an-entertainerxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskosherbrookegawaxn--mgbqly7c0a67fbclintonkotsukubankarumaifarmsteadrobaknoluoktachikawakayamadridvallee-aosteroyxn--mgbqly7cvafr-1xn--mgbt3dhdxn--mgbtf8flapymntrysiljanxn--mgbtx2bauhauspostman-echocolatemasekd1xn--mgbx4cd0abbvieeexn--mix082fbxoschweizxn--mix891fedorainfraclouderaxn--mjndalen-64axn--mk0axin-vpnclothingdustdatadetectjmaxxxn--12c1fe0bradescotlandrrxn--mk1bu44cn-northwest-1xn--mkru45is-bykleclerchoshibuyachiyodancexn--mlatvuopmi-s4axn--mli-tlavangenxn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-certifiedxn--mosjen-eyawaraxn--mot-tlazioxn--mre-og-romsdal-qqbuseranishiaritakurashikis-foundationxn--msy-ula0hakubaghdadultravelchannelxn--mtta-vrjjat-k7aflakstadaokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1minisitexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45q11citadelhicampinashikiminohostfoldnavyxn--nit225koshimizumakiyosunnydayxn--nmesjevuemie-tcbalestrandabergamoarekeymachineustarnbergxn--nnx388axn--nodessakyotanabelaudiopsysynology-dstreamlitappittsburghofficialxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveftplanetariuminamitanexn--nvuotna-hwaxn--nyqy26axn--o1achernihivgubsxn--o3cw4hakuis-a-democratravelersinsurancexn--o3cyx2axn--od0algxn--od0aq3belementorayoshiokanumazuryukuhashimojibxos3-website-ap-southeast-1xn--ogbpf8flatangerxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--otu796dxn--p1acfedorapeoplegoismailillehammerfeste-ipatriaxn--p1ais-gonexn--pgbs0dhlx3xn--porsgu-sta26fedoraprojectoyotsukaidoxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cngreaterxn--qcka1pmcpenzaporizhzhiaxn--qqqt11minnesotaketakayamassivegridxn--qxa6axn--qxamsterdamnserverbaniaxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabaris-into-animeetrentin-sued-tirolxn--rennesy-v1axn--rhkkervju-01afeiraquarelleasingujaratoyouraxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hakusanagochihayaakasakawaiishopitsitexn--rovu88bellevuelosangeles3-website-ap-southeast-2xn--rros-granvindafjordxn--rskog-uuaxn--rst-0naturhistorischesxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithaldenxn--s9brj9cnpyatigorskolecznagatorodoyxn--sandnessjen-ogbellunord-odalombardyn53xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4dbgdty6citichernovtsyncloudrangedaluccarbonia-iglesias-carboniaiglesiascarboniaxn--skierv-utazasxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5natuurwetenschappenginexn--slt-elabcieszynh-servebeero-stageiseiroumuenchencoreapigeelvinckoshunantankmpspawnextdirectrentino-s-tirolxn--smla-hraxn--smna-gratangentlentapisa-geekosugexn--snase-nraxn--sndre-land-0cbeneventochiokinoshimaintenancebinordreisa-hockeynutazurestaticappspaceusercontentateyamaveroykenglandeltaitogitsumitakagiizeasypanelblagrarchaeologyeongbuk0emmafann-arboretumbriamallamaceiobbcg123homepagefrontappchizip61123minsidaarborteaches-yogasawaracingroks-theatree123hjemmesidealerimo-i-rana4u2-localhistorybolzano-altoadigeometre-experts-comptables3-ap-northeast-123miwebcambridgehirn4t3l3p0rtarumizusawabogadobeaemcloud-fr123paginaweberkeleyokosukanrabruzzombieidskoguchikushinonsenasakuchinotsuchiurakawafaicloudineat-url-o-g-i-naval-d-aosta-valleyokote164-b-datacentermezproxyzgoraetnabudejjudaicadaquest-mon-blogueurodirumaceratabuseating-organicbcn-north-123saitamakawabartheshopencraftrainingdyniajuedischesapeakebayernavigationavoi234lima-cityeats3-ap-northeast-20001wwwedeployokozeastasiamunemurorangecloudplatform0xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyurihonjournalistjohnikonanporovnobserverxn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbeppublishproxyusuharavocatanzarowegroweiboltashkentatamotorsitestingivingjemnes3-eu-central-1kappleadpages-12hpalmspringsakerxn--stre-toten-zcbeskidyn-ip24xn--t60b56axn--tckweddingxn--tiq49xqyjelasticbeanstalkhmelnitskiyamarumorimachidaxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbestbuyshoparenagareyamaizurugbyenvironmentalconservationflashdrivefsnillfjordiscordsezjampaleoceanographics3-website-eu-west-1xn--trentin-sdtirol-7vbetainaboxfuseekloges3-website-sa-east-1xn--trentino-sd-tirol-c3bhzcasertainaioirasebastopologyeongnamegawafflecellclstagemologicaliforniavoues3-eu-west-1xn--trentino-sdtirol-szbielawalbrzycharitypedreamhostersvp4xn--trentinosd-tirol-rzbiellaakesvuemieleccebizenakanotoddeninoheguriitatebayashiibahcavuotnagaivuotnagaokakyotambabybluebitelevisioncilla-speziaxarnetbank8s3-eu-west-2xn--trentinosdtirol-7vbieszczadygeyachimataijiiyamanouchikuhokuryugasakitaurayasudaxn--trentinsd-tirol-6vbievat-band-campaignieznombrendlyngengerdalces3-website-us-east-1xn--trentinsdtirol-nsbifukagawalesundiscountypeformelhusgardeninomiyakonojorpelandiscourses3-website-us-west-1xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvestre-slidrexn--uc0ay4axn--uist22halsakakinokiaxn--uisz3gxn--unjrga-rtarnobrzegyptianxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtularvikonskowolayangroupiemontexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccerxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbigvalledaostaobaomoriguchiharag-cloud-championshiphoplixboxenirasakincheonishiazaindependent-commissionishigouvicasinordeste-idclkarasjohkamikitayamatsurindependent-inquest-a-la-masionishiharaxn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402cnsantabarbaraxn--vhquvestre-totennishiawakuraxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861biharstadotsubetsugaruhrxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cntjomeldaluroyxn--wgbl6axn--xhq521bihorologyusuisservegame-serverxn--xkc2al3hye2axn--xkc2dl3a5ee0hammarfeastafricaravantaaxn--y9a3aquariumintereitrentino-sudtirolxn--yer-znaumburgxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4dbrk0cexn--ystre-slidre-ujbikedaejeonbukarasjokarasuyamarriottatsunoceanographiquehimejindependent-inquiryuufcfanishiizunazukindependent-panelomoliseminemrxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bilbaogashimadachicagoboavistanbulsan-sudtirolbia-tempio-olbiatempioolbialystokkeliwebredirectme-south-1xnbayxz
\ No newline at end of file
diff --git a/test/performance/vendor/golang.org/x/net/publicsuffix/list.go b/test/performance/vendor/golang.org/x/net/publicsuffix/list.go
new file mode 100644
index 000000000..d56e9e762
--- /dev/null
+++ b/test/performance/vendor/golang.org/x/net/publicsuffix/list.go
@@ -0,0 +1,203 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go
+
+// Package publicsuffix provides a public suffix list based on data from
+// https://publicsuffix.org/
+//
+// A public suffix is one under which Internet users can directly register
+// names. It is related to, but different from, a TLD (top level domain).
+//
+// "com" is a TLD (top level domain). Top level means it has no dots.
+//
+// "com" is also a public suffix. Amazon and Google have registered different
+// siblings under that domain: "amazon.com" and "google.com".
+//
+// "au" is another TLD, again because it has no dots. But it's not "amazon.au".
+// Instead, it's "amazon.com.au".
+//
+// "com.au" isn't an actual TLD, because it's not at the top level (it has
+// dots). But it is an eTLD (effective TLD), because that's the branching point
+// for domain name registrars.
+//
+// Another name for "an eTLD" is "a public suffix". Often, what's more of
+// interest is the eTLD+1, or one more label than the public suffix. For
+// example, browsers partition read/write access to HTTP cookies according to
+// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from
+// "google.com.au", but web pages served from "maps.google.com" can share
+// cookies from "www.google.com", so you don't have to sign into Google Maps
+// separately from signing into Google Web Search. Note that all four of those
+// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1,
+// the last two are not (but share the same eTLD+1: "google.com").
+//
+// All of these domains have the same eTLD+1:
+// - "www.books.amazon.co.uk"
+// - "books.amazon.co.uk"
+// - "amazon.co.uk"
+//
+// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk".
+//
+// There is no closed form algorithm to calculate the eTLD of a domain.
+// Instead, the calculation is data driven. This package provides a
+// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at
+// https://publicsuffix.org/
+package publicsuffix // import "golang.org/x/net/publicsuffix"
+
+// TODO: specify case sensitivity and leading/trailing dot behavior for
+// func PublicSuffix and func EffectiveTLDPlusOne.
+
+import (
+ "fmt"
+ "net/http/cookiejar"
+ "strings"
+)
+
+// List implements the cookiejar.PublicSuffixList interface by calling the
+// PublicSuffix function.
+var List cookiejar.PublicSuffixList = list{}
+
+type list struct{}
+
+func (list) PublicSuffix(domain string) string {
+ ps, _ := PublicSuffix(domain)
+ return ps
+}
+
+func (list) String() string {
+ return version
+}
+
+// PublicSuffix returns the public suffix of the domain using a copy of the
+// publicsuffix.org database compiled into the library.
+//
+// icann is whether the public suffix is managed by the Internet Corporation
+// for Assigned Names and Numbers. If not, the public suffix is either a
+// privately managed domain (and in practice, not a top level domain) or an
+// unmanaged top level domain (and not explicitly mentioned in the
+// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN
+// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and
+// "cromulent" is an unmanaged top level domain.
+//
+// Use cases for distinguishing ICANN domains like "foo.com" from private
+// domains like "foo.appspot.com" can be found at
+// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases
+func PublicSuffix(domain string) (publicSuffix string, icann bool) {
+ lo, hi := uint32(0), uint32(numTLD)
+ s, suffix, icannNode, wildcard := domain, len(domain), false, false
+loop:
+ for {
+ dot := strings.LastIndex(s, ".")
+ if wildcard {
+ icann = icannNode
+ suffix = 1 + dot
+ }
+ if lo == hi {
+ break
+ }
+ f := find(s[1+dot:], lo, hi)
+ if f == notFound {
+ break
+ }
+
+ u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength))
+ icannNode = u&(1<>= nodesBitsICANN
+ u = children.get(u & (1<>= childrenBitsLo
+ hi = u & (1<>= childrenBitsHi
+ switch u & (1<>= childrenBitsNodeType
+ wildcard = u&(1<>= nodesBitsTextLength
+ offset := x & (1<
Date: Wed, 15 Feb 2023 12:01:43 +0000
Subject: [PATCH 2/5] make deps
---
src/core/metrics/sources/nginx_access_log.go | 10 +++++-----
test/docker/Dockerfile | 3 +++
.../v2/src/core/metrics/sources/nginx_access_log.go | 10 +++++-----
3 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/src/core/metrics/sources/nginx_access_log.go b/src/core/metrics/sources/nginx_access_log.go
index 8f8150867..2061e9a6a 100644
--- a/src/core/metrics/sources/nginx_access_log.go
+++ b/src/core/metrics/sources/nginx_access_log.go
@@ -28,7 +28,7 @@ import (
const (
spaceDelim = " "
- pattern = `[A-Z]+\s.+\s[A-Z]+/.+`
+ pattern = `[A-Z]+\s.+\s[A-Z]+/.+`
)
// This metrics source is used to tail the NGINX access logs to retrieve metrics.
@@ -371,15 +371,15 @@ func (c *NginxAccessLog) logStats(ctx context.Context, logFile, logFormat string
}
func getParsedRequest(request string) (method string, uri string, protocol string) {
-
- // Looking for capital letters, a space, anything, a space, capital letters, forward slash then anything.
+
+ // Looking for capital letters, a space, anything, a space, capital letters, forward slash then anything.
// Example: DELETE nginx_status HTTP/1.1
regex, err := regexp.Compile(pattern)
- if err != nil{
+ if err != nil {
return
}
-
+
if regex.FindString(request) == "" {
return
}
diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile
index b5500c48a..4ffb1f920 100644
--- a/test/docker/Dockerfile
+++ b/test/docker/Dockerfile
@@ -12,6 +12,9 @@ LABEL maintainer="NGINX Docker Maintainers "
# Download certificate and key from the customer portal (https://account.f5.com)
# and copy to the build context
+# https://askubuntu.com/questions/909277/avoiding-user-interaction-with-tzdata-when-installing-certbot-in-a-docker-contai
+ARG DEBIAN_FRONTEND=noninteractive
+
RUN --mount=type=secret,id=nginx-crt,dst=nginx-repo.crt \
--mount=type=secret,id=nginx-key,dst=nginx-repo.key \
set -x \
diff --git a/test/performance/vendor/github.com/nginx/agent/v2/src/core/metrics/sources/nginx_access_log.go b/test/performance/vendor/github.com/nginx/agent/v2/src/core/metrics/sources/nginx_access_log.go
index 8f8150867..2061e9a6a 100644
--- a/test/performance/vendor/github.com/nginx/agent/v2/src/core/metrics/sources/nginx_access_log.go
+++ b/test/performance/vendor/github.com/nginx/agent/v2/src/core/metrics/sources/nginx_access_log.go
@@ -28,7 +28,7 @@ import (
const (
spaceDelim = " "
- pattern = `[A-Z]+\s.+\s[A-Z]+/.+`
+ pattern = `[A-Z]+\s.+\s[A-Z]+/.+`
)
// This metrics source is used to tail the NGINX access logs to retrieve metrics.
@@ -371,15 +371,15 @@ func (c *NginxAccessLog) logStats(ctx context.Context, logFile, logFormat string
}
func getParsedRequest(request string) (method string, uri string, protocol string) {
-
- // Looking for capital letters, a space, anything, a space, capital letters, forward slash then anything.
+
+ // Looking for capital letters, a space, anything, a space, capital letters, forward slash then anything.
// Example: DELETE nginx_status HTTP/1.1
regex, err := regexp.Compile(pattern)
- if err != nil{
+ if err != nil {
return
}
-
+
if regex.FindString(request) == "" {
return
}
From 601f18fb7a13c56f5a6db1cf8f6f9be96dcad8ad Mon Sep 17 00:00:00 2001
From: Aphral Griffin
Date: Wed, 15 Feb 2023 15:08:22 +0000
Subject: [PATCH 3/5] change component test
---
test/component/agent_api_test.go | 25 ++++++++++---------------
1 file changed, 10 insertions(+), 15 deletions(-)
diff --git a/test/component/agent_api_test.go b/test/component/agent_api_test.go
index f079709b7..0a92dcf2c 100644
--- a/test/component/agent_api_test.go
+++ b/test/component/agent_api_test.go
@@ -9,6 +9,7 @@ import (
"testing"
"time"
+ "encoding/json"
"github.com/go-resty/resty/v2"
"github.com/nginx/agent/sdk/v2/proto"
@@ -74,23 +75,17 @@ func TestGetNginxInstances(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, response.StatusCode())
+ var nginxDetailsResponse []*proto.NginxDetails
+ responseData := response.Body()
+ err = json.Unmarshal(responseData, &nginxDetailsResponse)
+ assert.Nil(t, err)
+ assert.True(t, json.Valid(responseData))
+
if tt.nginxDetails == nil {
- assert.Equal(t, "[]", response.String())
+ assert.Equal(t, 0, len(nginxDetailsResponse))
} else {
- nginxDetails := tutils.ProcessApiNginxInstanceResponse(response)
- for _, detail := range nginxDetails {
- detail := strings.Split(detail, ":")
- switch {
- case strings.Contains(detail[0], "nginx_id"):
- assert.Equal(t, "45d4sf5d4sf4e8s4f8es4564", detail[1])
- case strings.Contains(detail[0], "version"):
- assert.Equal(t, "21", detail[1])
- case strings.Contains(detail[0], "conf_path"):
- assert.Equal(t, "/etc/nginx/conf", detail[1])
- case strings.Contains(detail[0], "start_time"):
- assert.Equal(t, "1238043824", detail[1])
- }
- }
+ assert.Equal(t, 1, len(nginxDetailsResponse))
+ assert.Equal(t, tt.nginxDetails, nginxDetailsResponse[0])
}
agentAPI.Close()
From a50ba0f615c8b0227297f1797f47a8908901a9f8 Mon Sep 17 00:00:00 2001
From: Aphral Griffin
Date: Wed, 15 Feb 2023 15:58:56 +0000
Subject: [PATCH 4/5] fix api integration test
---
test/component/agent_api_test.go | 2 +-
test/integration/api/api_test.go | 30 +++++++++----------
test/integration/go.mod | 2 +-
.../v2/test/utils/api_process_response.go | 12 +-------
test/utils/api_process_response.go | 12 +-------
5 files changed, 18 insertions(+), 40 deletions(-)
diff --git a/test/component/agent_api_test.go b/test/component/agent_api_test.go
index 0a92dcf2c..fab531251 100644
--- a/test/component/agent_api_test.go
+++ b/test/component/agent_api_test.go
@@ -177,7 +177,7 @@ func TestMetrics(t *testing.T) {
assert.Contains(t, response.String(), "# TYPE system_cpu_idle gauge")
agentAPI.Close()
- responseData := tutils.ProcessApiMetricResponse(response)
+ responseData := tutils.ProcessResponse(response)
for _, m := range responseData {
metric := strings.Split(m, " ")
diff --git a/test/integration/api/api_test.go b/test/integration/api/api_test.go
index 8d3d4641a..96b71cdaf 100644
--- a/test/integration/api/api_test.go
+++ b/test/integration/api/api_test.go
@@ -2,6 +2,7 @@ package api
import (
"context"
+ "encoding/json"
"fmt"
"net/http"
"os"
@@ -11,8 +12,8 @@ import (
"time"
"github.com/go-resty/resty/v2"
+ "github.com/nginx/agent/sdk/v2/proto"
tutils "github.com/nginx/agent/v2/test/utils"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/modules/compose"
@@ -57,21 +58,18 @@ func TestAPI_Nginx(t *testing.T) {
assert.Contains(t, resp.String(), "nginx_id")
assert.NotContains(t, resp.String(), "test_fail_nginx")
- nginxDetails := tutils.ProcessApiNginxInstanceResponse(resp)
+ var nginxDetailsResponse []*proto.NginxDetails
- for _, detail := range nginxDetails {
- detail := strings.Split(detail, ":")
- switch {
- case strings.Contains(detail[0], "nginx_id"):
- assert.NotNil(t, detail[1])
- case strings.Contains(detail[0], "version"):
- assert.NotNil(t, detail[1])
- case strings.Contains(detail[0], "runtime_modules"):
- assert.Contains(t, detail[1], "http_ssl_module")
- case strings.Contains(detail[0], "conf_path"):
- assert.Equal(t, "/etc/nginx/nginx.conf", detail[1])
- }
- }
+ responseData := resp.Body()
+ err = json.Unmarshal(responseData, &nginxDetailsResponse)
+
+ assert.Nil(t, err)
+ assert.True(t, json.Valid(responseData))
+
+ assert.NotNil(t, nginxDetailsResponse[0].NginxId)
+ assert.NotNil(t, nginxDetailsResponse[0].Version)
+ assert.Contains(t, nginxDetailsResponse[0].RuntimeModules, "http_stub_status_module")
+ assert.Equal(t, "/etc/nginx/nginx.conf", nginxDetailsResponse[0].ConfPath)
}
@@ -93,7 +91,7 @@ func TestAPI_Metrics(t *testing.T) {
assert.Contains(t, resp.String(), "system_cpu_system")
assert.NotContains(t, resp.String(), "test_fail_metric")
- metrics := tutils.ProcessApiMetricResponse(resp)
+ metrics := tutils.ProcessResponse(resp)
for _, m := range metrics {
metric := strings.Split(m, " ")
diff --git a/test/integration/go.mod b/test/integration/go.mod
index cc2f9a7b7..553e43da0 100644
--- a/test/integration/go.mod
+++ b/test/integration/go.mod
@@ -4,6 +4,7 @@ go 1.19
require (
github.com/go-resty/resty/v2 v2.7.0
+ github.com/nginx/agent/sdk/v2 v2.0.0-00010101000000-000000000000
github.com/nginx/agent/v2 v2.22.0
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/sirupsen/logrus v1.9.0
@@ -90,7 +91,6 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
- github.com/nginx/agent/sdk/v2 v2.0.0-00010101000000-000000000000 // indirect
github.com/nginxinc/nginx-go-crossplane v0.4.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
diff --git a/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go b/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go
index b89bafc92..5d2e7b434 100644
--- a/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go
+++ b/test/performance/vendor/github.com/nginx/agent/v2/test/utils/api_process_response.go
@@ -5,7 +5,7 @@ import (
"strings"
)
-func ProcessApiMetricResponse(resp *resty.Response) []string {
+func ProcessResponse(resp *resty.Response) []string {
metrics := strings.Split(resp.String(), "\n")
i := 0
@@ -21,13 +21,3 @@ func ProcessApiMetricResponse(resp *resty.Response) []string {
return metrics
}
-
-func ProcessApiNginxInstanceResponse(resp *resty.Response) []string {
- details := strings.ReplaceAll(resp.String(), "\"", "")
- details = strings.ReplaceAll(details, "\\", "")
-
- detail := strings.Split(details, ",")
-
- return detail
-
-}
diff --git a/test/utils/api_process_response.go b/test/utils/api_process_response.go
index b89bafc92..5d2e7b434 100644
--- a/test/utils/api_process_response.go
+++ b/test/utils/api_process_response.go
@@ -5,7 +5,7 @@ import (
"strings"
)
-func ProcessApiMetricResponse(resp *resty.Response) []string {
+func ProcessResponse(resp *resty.Response) []string {
metrics := strings.Split(resp.String(), "\n")
i := 0
@@ -21,13 +21,3 @@ func ProcessApiMetricResponse(resp *resty.Response) []string {
return metrics
}
-
-func ProcessApiNginxInstanceResponse(resp *resty.Response) []string {
- details := strings.ReplaceAll(resp.String(), "\"", "")
- details = strings.ReplaceAll(details, "\\", "")
-
- detail := strings.Split(details, ",")
-
- return detail
-
-}
From b549aec0815f82911c64771a657e82157669826a Mon Sep 17 00:00:00 2001
From: Aphral Griffin
Date: Wed, 15 Feb 2023 16:17:20 +0000
Subject: [PATCH 5/5] make deps
---
test/integration/go.mod | 10 +-
test/integration/go.sum | 43 +-
.../github.com/Azure/go-ansiterm/SECURITY.md | 41 +
.../github.com/pelletier/go-toml/v2/README.md | 11 +
.../github.com/pelletier/go-toml/v2/decode.go | 88 +-
.../github.com/pelletier/go-toml/v2/errors.go | 32 +-
.../go-toml/v2/internal/ast/builder.go | 51 -
.../go-toml/v2/internal/characters/ascii.go | 42 +
.../v2/{ => internal/characters}/utf8.go | 53 +-
.../go-toml/v2/internal/tracker/key.go | 12 +-
.../go-toml/v2/internal/tracker/seen.go | 28 +-
.../pelletier/go-toml/v2/localtime.go | 6 +-
.../pelletier/go-toml/v2/marshaler.go | 4 +-
.../github.com/pelletier/go-toml/v2/strict.go | 34 +-
.../github.com/pelletier/go-toml/v2/types.go | 10 +-
.../pelletier/go-toml/v2/unmarshaler.go | 122 +-
.../v2/{internal/ast => unstable}/ast.go | 68 +-
.../pelletier/go-toml/v2/unstable/builder.go | 71 +
.../pelletier/go-toml/v2/unstable/doc.go | 3 +
.../v2/{internal/ast => unstable}/kind.go | 12 +-
.../go-toml/v2/{ => unstable}/parser.go | 293 ++-
.../go-toml/v2/{ => unstable}/scanner.go | 50 +-
.../vendor/github.com/spf13/afero/memmap.go | 5 +
.../vendor/github.com/spf13/viper/Makefile | 2 +-
.../vendor/github.com/spf13/viper/README.md | 6 +-
.../viper/internal/encoding/toml/codec.go | 29 +-
.../viper/internal/encoding/toml/codec2.go | 19 -
.../viper/internal/encoding/yaml/codec.go | 2 +-
.../viper/internal/encoding/yaml/yaml2.go | 14 -
.../viper/internal/encoding/yaml/yaml3.go | 14 -
.../vendor/github.com/spf13/viper/viper.go | 5 +
.../github.com/subosito/gotenv/gotenv.go | 44 +-
.../go.opentelemetry.io/otel/.lycheeignore | 1 +
.../go.opentelemetry.io/otel/CHANGELOG.md | 138 +-
.../vendor/go.opentelemetry.io/otel/Makefile | 10 +-
.../go.opentelemetry.io/otel/RELEASING.md | 11 +-
.../go.opentelemetry.io/otel/handler.go | 25 +-
.../otel/internal/global/internal_logging.go | 30 +-
.../otel/sdk/resource/builtin.go | 2 +-
.../otel/sdk/resource/container.go | 2 +-
.../otel/sdk/resource/env.go | 2 +-
.../otel/sdk/resource/os.go | 2 +-
.../otel/sdk/resource/process.go | 2 +-
.../otel/sdk/trace/provider.go | 15 +-
.../otel/sdk/trace/sampling.go | 10 +-
.../otel/sdk/trace/span.go | 2 +-
.../otel/semconv/v1.17.0/doc.go | 20 +
.../otel/semconv/v1.17.0/exception.go | 20 +
.../otel/semconv/v1.17.0/http.go | 21 +
.../otel/semconv/v1.17.0/resource.go | 1118 ++++++++++
.../otel/semconv/v1.17.0/schema.go | 20 +
.../otel/semconv/v1.17.0/trace.go | 1892 +++++++++++++++++
.../go.opentelemetry.io/otel/version.go | 2 +-
.../go.opentelemetry.io/otel/versions.yaml | 4 +-
test/integration/vendor/modules.txt | 26 +-
55 files changed, 3949 insertions(+), 650 deletions(-)
create mode 100644 test/integration/vendor/github.com/Azure/go-ansiterm/SECURITY.md
delete mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
rename test/integration/vendor/github.com/pelletier/go-toml/v2/{ => internal/characters}/utf8.go (87%)
rename test/integration/vendor/github.com/pelletier/go-toml/v2/{internal/ast => unstable}/ast.go (60%)
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
create mode 100644 test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
rename test/integration/vendor/github.com/pelletier/go-toml/v2/{internal/ast => unstable}/kind.go (81%)
rename test/integration/vendor/github.com/pelletier/go-toml/v2/{ => unstable}/parser.go (70%)
rename test/integration/vendor/github.com/pelletier/go-toml/v2/{ => unstable}/scanner.go (79%)
delete mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
delete mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
delete mode 100644 test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
create mode 100644 test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
create mode 100644 test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
create mode 100644 test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
create mode 100644 test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
create mode 100644 test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
create mode 100644 test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
diff --git a/test/integration/go.mod b/test/integration/go.mod
index 49f4790f2..da081fe89 100644
--- a/test/integration/go.mod
+++ b/test/integration/go.mod
@@ -15,7 +15,6 @@ require (
)
require (
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/AlecAivazis/survey/v2 v2.3.6 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
@@ -71,7 +70,8 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.15.15 // indirect
- github.com/kr/pretty v0.3.1 // indirect
+ github.com/klauspost/cpuid/v2 v2.1.0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
@@ -99,7 +99,7 @@ require (
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/orcaman/concurrent-map v1.0.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
- github.com/pelletier/go-toml/v2 v2.0.5 // indirect
+ github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
@@ -111,12 +111,14 @@ require (
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shirou/gopsutil/v3 v3.22.7 // indirect
- github.com/spf13/afero v1.9.2 // indirect
+ github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/cobra v1.6.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.15.0 // indirect
+ github.com/stretchr/objx v0.5.0 // indirect
+ github.com/subosito/gotenv v1.4.2 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect
diff --git a/test/integration/go.sum b/test/integration/go.sum
index 1b2d7124a..8a349e813 100644
--- a/test/integration/go.sum
+++ b/test/integration/go.sum
@@ -24,14 +24,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-<<<<<<< HEAD
-cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
-cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
-=======
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
->>>>>>> main
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -48,6 +42,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw=
github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
@@ -226,6 +221,7 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
+github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -374,15 +370,10 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-<<<<<<< HEAD
-github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0=
-github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
-github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
-github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
-=======
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
->>>>>>> main
+github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
+github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -501,12 +492,8 @@ github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HD
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-<<<<<<< HEAD
-github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
-github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
-=======
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
->>>>>>> main
+github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -576,12 +563,8 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-<<<<<<< HEAD
-github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
-github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
-=======
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
->>>>>>> main
+github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
@@ -615,12 +598,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-<<<<<<< HEAD
-github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
-github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-=======
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
->>>>>>> main
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/testcontainers/testcontainers-go v0.17.0 h1:UdKSw2DJXinlS6ijbFb4VHpQzD+EfTwcTq1/19a+8PU=
github.com/testcontainers/testcontainers-go v0.17.0/go.mod h1:n5trpHrB68IUelEqGNC8VipaCo6jOGusU44kIK11XRs=
@@ -678,6 +657,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:
go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk=
go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ=
+go.opentelemetry.io/otel v1.12.0/go.mod h1:geaoz0L0r1BEOR81k7/n9W4TCXYCJ7bPO7K374jQHG0=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8=
@@ -690,9 +670,11 @@ go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW0
go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE=
go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o=
+go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE=
go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE=
go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc=
+go.opentelemetry.io/otel/trace v1.12.0/go.mod h1:pHlgBynn6s25qJ2szD+Bv+iwKJttjHSI3lUAyf0GNuQ=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=
go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ=
@@ -1047,19 +1029,14 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-<<<<<<< HEAD
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70=
-google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
-=======
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
->>>>>>> main
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
diff --git a/test/integration/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/test/integration/vendor/github.com/Azure/go-ansiterm/SECURITY.md
new file mode 100644
index 000000000..e138ec5d6
--- /dev/null
+++ b/test/integration/vendor/github.com/Azure/go-ansiterm/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md b/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md
index a63c3a796..9f8439cc7 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/README.md
@@ -140,6 +140,17 @@ fmt.Println(string(b))
[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal
+## Unstable API
+
+This API does not yet follow the backward compatibility guarantees of this
+library. They provide early access to features that may have rough edges or an
+API subject to change.
+
+### Parser
+
+Parser is the unstable API that allows iterative parsing of a TOML document at
+the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable.
+
## Benchmarks
Execution time speedup compared to other Go TOML libraries:
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go
index 4af965360..3a860d0f6 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/decode.go
@@ -5,6 +5,8 @@ import (
"math"
"strconv"
"time"
+
+ "github.com/pelletier/go-toml/v2/unstable"
)
func parseInteger(b []byte) (int64, error) {
@@ -32,7 +34,7 @@ func parseLocalDate(b []byte) (LocalDate, error) {
var date LocalDate
if len(b) != 10 || b[4] != '-' || b[7] != '-' {
- return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD")
+ return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD")
}
var err error
@@ -53,7 +55,7 @@ func parseLocalDate(b []byte) (LocalDate, error) {
}
if !isValidDate(date.Year, date.Month, date.Day) {
- return LocalDate{}, newDecodeError(b, "impossible date")
+ return LocalDate{}, unstable.NewParserError(b, "impossible date")
}
return date, nil
@@ -64,7 +66,7 @@ func parseDecimalDigits(b []byte) (int, error) {
for i, c := range b {
if c < '0' || c > '9' {
- return 0, newDecodeError(b[i:i+1], "expected digit (0-9)")
+ return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)")
}
v *= 10
v += int(c - '0')
@@ -97,7 +99,7 @@ func parseDateTime(b []byte) (time.Time, error) {
} else {
const dateTimeByteLen = 6
if len(b) != dateTimeByteLen {
- return time.Time{}, newDecodeError(b, "invalid date-time timezone")
+ return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone")
}
var direction int
switch b[0] {
@@ -106,11 +108,11 @@ func parseDateTime(b []byte) (time.Time, error) {
case '+':
direction = +1
default:
- return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character")
+ return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character")
}
if b[3] != ':' {
- return time.Time{}, newDecodeError(b[3:4], "expected a : separator")
+ return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator")
}
hours, err := parseDecimalDigits(b[1:3])
@@ -118,7 +120,7 @@ func parseDateTime(b []byte) (time.Time, error) {
return time.Time{}, err
}
if hours > 23 {
- return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours")
+ return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours")
}
minutes, err := parseDecimalDigits(b[4:6])
@@ -126,7 +128,7 @@ func parseDateTime(b []byte) (time.Time, error) {
return time.Time{}, err
}
if minutes > 59 {
- return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes")
+ return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes")
}
seconds := direction * (hours*3600 + minutes*60)
@@ -139,7 +141,7 @@ func parseDateTime(b []byte) (time.Time, error) {
}
if len(b) > 0 {
- return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone")
+ return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone")
}
t := time.Date(
@@ -160,7 +162,7 @@ func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
const localDateTimeByteMinLen = 11
if len(b) < localDateTimeByteMinLen {
- return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
+ return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
}
date, err := parseLocalDate(b[:10])
@@ -171,7 +173,7 @@ func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
sep := b[10]
if sep != 'T' && sep != ' ' && sep != 't' {
- return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space")
+ return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space")
}
t, rest, err := parseLocalTime(b[11:])
@@ -195,7 +197,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
// check if b matches to have expected format HH:MM:SS[.NNNNNN]
const localTimeByteLen = 8
if len(b) < localTimeByteLen {
- return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
+ return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
}
var err error
@@ -206,10 +208,10 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
}
if t.Hour > 23 {
- return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23")
+ return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23")
}
if b[2] != ':' {
- return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes")
+ return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes")
}
t.Minute, err = parseDecimalDigits(b[3:5])
@@ -217,10 +219,10 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
return t, nil, err
}
if t.Minute > 59 {
- return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59")
+ return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59")
}
if b[5] != ':' {
- return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds")
+ return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds")
}
t.Second, err = parseDecimalDigits(b[6:8])
@@ -229,7 +231,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
}
if t.Second > 60 {
- return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60")
+ return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60")
}
b = b[8:]
@@ -242,7 +244,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
for i, c := range b[1:] {
if !isDigit(c) {
if i == 0 {
- return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point")
+ return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point")
}
break
}
@@ -266,7 +268,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) {
}
if precision == 0 {
- return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit")
+ return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit")
}
t.Nanosecond = frac * nspow[precision]
@@ -289,24 +291,24 @@ func parseFloat(b []byte) (float64, error) {
}
if cleaned[0] == '.' {
- return 0, newDecodeError(b, "float cannot start with a dot")
+ return 0, unstable.NewParserError(b, "float cannot start with a dot")
}
if cleaned[len(cleaned)-1] == '.' {
- return 0, newDecodeError(b, "float cannot end with a dot")
+ return 0, unstable.NewParserError(b, "float cannot end with a dot")
}
dotAlreadySeen := false
for i, c := range cleaned {
if c == '.' {
if dotAlreadySeen {
- return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point")
+ return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point")
}
if !isDigit(cleaned[i-1]) {
- return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit")
+ return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit")
}
if !isDigit(cleaned[i+1]) {
- return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit")
+ return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit")
}
dotAlreadySeen = true
}
@@ -317,12 +319,12 @@ func parseFloat(b []byte) (float64, error) {
start = 1
}
if cleaned[start] == '0' && isDigit(cleaned[start+1]) {
- return 0, newDecodeError(b, "float integer part cannot have leading zeroes")
+ return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes")
}
f, err := strconv.ParseFloat(string(cleaned), 64)
if err != nil {
- return 0, newDecodeError(b, "unable to parse float: %w", err)
+ return 0, unstable.NewParserError(b, "unable to parse float: %w", err)
}
return f, nil
@@ -336,7 +338,7 @@ func parseIntHex(b []byte) (int64, error) {
i, err := strconv.ParseInt(string(cleaned), 16, 64)
if err != nil {
- return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err)
+ return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err)
}
return i, nil
@@ -350,7 +352,7 @@ func parseIntOct(b []byte) (int64, error) {
i, err := strconv.ParseInt(string(cleaned), 8, 64)
if err != nil {
- return 0, newDecodeError(b, "couldn't parse octal number: %w", err)
+ return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err)
}
return i, nil
@@ -364,7 +366,7 @@ func parseIntBin(b []byte) (int64, error) {
i, err := strconv.ParseInt(string(cleaned), 2, 64)
if err != nil {
- return 0, newDecodeError(b, "couldn't parse binary number: %w", err)
+ return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err)
}
return i, nil
@@ -387,12 +389,12 @@ func parseIntDec(b []byte) (int64, error) {
}
if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
- return 0, newDecodeError(b, "leading zero not allowed on decimal number")
+ return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number")
}
i, err := strconv.ParseInt(string(cleaned), 10, 64)
if err != nil {
- return 0, newDecodeError(b, "couldn't parse decimal number: %w", err)
+ return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err)
}
return i, nil
@@ -409,11 +411,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
}
if b[start] == '_' {
- return nil, newDecodeError(b[start:start+1], "number cannot start with underscore")
+ return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore")
}
if b[len(b)-1] == '_' {
- return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore")
+ return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
}
// fast path
@@ -435,7 +437,7 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
c := b[i]
if c == '_' {
if !before {
- return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores")
+ return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
}
before = false
} else {
@@ -449,11 +451,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
if b[0] == '_' {
- return nil, newDecodeError(b[0:1], "number cannot start with underscore")
+ return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore")
}
if b[len(b)-1] == '_' {
- return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore")
+ return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
}
// fast path
@@ -476,10 +478,10 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
switch c {
case '_':
if !before {
- return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores")
+ return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
}
if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
- return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent")
+ return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent")
}
before = false
case '+', '-':
@@ -488,15 +490,15 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
before = false
case 'e', 'E':
if i < len(b)-1 && b[i+1] == '_' {
- return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent")
+ return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent")
}
cleaned = append(cleaned, c)
case '.':
if i < len(b)-1 && b[i+1] == '_' {
- return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point")
+ return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point")
}
if i > 0 && b[i-1] == '_' {
- return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point")
+ return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point")
}
cleaned = append(cleaned, c)
default:
@@ -542,3 +544,7 @@ func daysIn(m int, year int) int {
func isLeap(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}
+
+func isDigit(r byte) bool {
+ return r >= '0' && r <= '9'
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go
index 2e7f0ffdf..309733f1f 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/errors.go
@@ -6,6 +6,7 @@ import (
"strings"
"github.com/pelletier/go-toml/v2/internal/danger"
+ "github.com/pelletier/go-toml/v2/unstable"
)
// DecodeError represents an error encountered during the parsing or decoding
@@ -55,25 +56,6 @@ func (s *StrictMissingError) String() string {
type Key []string
-// internal version of DecodeError that is used as the base to create a
-// DecodeError with full context.
-type decodeError struct {
- highlight []byte
- message string
- key Key // optional
-}
-
-func (de *decodeError) Error() string {
- return de.message
-}
-
-func newDecodeError(highlight []byte, format string, args ...interface{}) error {
- return &decodeError{
- highlight: highlight,
- message: fmt.Errorf(format, args...).Error(),
- }
-}
-
// Error returns the error message contained in the DecodeError.
func (e *DecodeError) Error() string {
return "toml: " + e.message
@@ -105,12 +87,12 @@ func (e *DecodeError) Key() Key {
// highlight can be freely deallocated.
//
//nolint:funlen
-func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
- offset := danger.SubsliceOffset(document, de.highlight)
+func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
+ offset := danger.SubsliceOffset(document, de.Highlight)
errMessage := de.Error()
errLine, errColumn := positionAtEnd(document[:offset])
- before, after := linesOfContext(document, de.highlight, offset, 3)
+ before, after := linesOfContext(document, de.Highlight, offset, 3)
var buf strings.Builder
@@ -140,7 +122,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
buf.Write(before[0])
}
- buf.Write(de.highlight)
+ buf.Write(de.Highlight)
if len(after) > 0 {
buf.Write(after[0])
@@ -158,7 +140,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
buf.WriteString(strings.Repeat(" ", len(before[0])))
}
- buf.WriteString(strings.Repeat("~", len(de.highlight)))
+ buf.WriteString(strings.Repeat("~", len(de.Highlight)))
if len(errMessage) > 0 {
buf.WriteString(" ")
@@ -183,7 +165,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
message: errMessage,
line: errLine,
column: errColumn,
- key: de.key,
+ key: de.Key,
human: buf.String(),
}
}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go
deleted file mode 100644
index 120f16e5c..000000000
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package ast
-
-type Reference int
-
-const InvalidReference Reference = -1
-
-func (r Reference) Valid() bool {
- return r != InvalidReference
-}
-
-type Builder struct {
- tree Root
- lastIdx int
-}
-
-func (b *Builder) Tree() *Root {
- return &b.tree
-}
-
-func (b *Builder) NodeAt(ref Reference) *Node {
- return b.tree.at(ref)
-}
-
-func (b *Builder) Reset() {
- b.tree.nodes = b.tree.nodes[:0]
- b.lastIdx = 0
-}
-
-func (b *Builder) Push(n Node) Reference {
- b.lastIdx = len(b.tree.nodes)
- b.tree.nodes = append(b.tree.nodes, n)
- return Reference(b.lastIdx)
-}
-
-func (b *Builder) PushAndChain(n Node) Reference {
- newIdx := len(b.tree.nodes)
- b.tree.nodes = append(b.tree.nodes, n)
- if b.lastIdx >= 0 {
- b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
- }
- b.lastIdx = newIdx
- return Reference(b.lastIdx)
-}
-
-func (b *Builder) AttachChild(parent Reference, child Reference) {
- b.tree.nodes[parent].child = int(child) - int(parent)
-}
-
-func (b *Builder) Chain(from Reference, to Reference) {
- b.tree.nodes[from].next = int(to) - int(from)
-}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
new file mode 100644
index 000000000..80f698db4
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
@@ -0,0 +1,42 @@
+package characters
+
+var invalidAsciiTable = [256]bool{
+ 0x00: true,
+ 0x01: true,
+ 0x02: true,
+ 0x03: true,
+ 0x04: true,
+ 0x05: true,
+ 0x06: true,
+ 0x07: true,
+ 0x08: true,
+ // 0x09 TAB
+ // 0x0A LF
+ 0x0B: true,
+ 0x0C: true,
+ // 0x0D CR
+ 0x0E: true,
+ 0x0F: true,
+ 0x10: true,
+ 0x11: true,
+ 0x12: true,
+ 0x13: true,
+ 0x14: true,
+ 0x15: true,
+ 0x16: true,
+ 0x17: true,
+ 0x18: true,
+ 0x19: true,
+ 0x1A: true,
+ 0x1B: true,
+ 0x1C: true,
+ 0x1D: true,
+ 0x1E: true,
+ 0x1F: true,
+ // 0x20 - 0x7E Printable ASCII characters
+ 0x7F: true,
+}
+
+func InvalidAscii(b byte) bool {
+ return invalidAsciiTable[b]
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
similarity index 87%
rename from test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go
rename to test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
index d47a4f20c..db4f45acb 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/utf8.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
@@ -1,4 +1,4 @@
-package toml
+package characters
import (
"unicode/utf8"
@@ -32,7 +32,7 @@ func (u utf8Err) Zero() bool {
// 0x9 => tab, ok
// 0xA - 0x1F => invalid
// 0x7F => invalid
-func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
+func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
// Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
offset := 0
for len(p) >= 8 {
@@ -48,7 +48,7 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
}
for i, b := range p[:8] {
- if invalidAscii(b) {
+ if InvalidAscii(b) {
err.Index = offset + i
err.Size = 1
return
@@ -62,7 +62,7 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
for i := 0; i < n; {
pi := p[i]
if pi < utf8.RuneSelf {
- if invalidAscii(pi) {
+ if InvalidAscii(pi) {
err.Index = offset + i
err.Size = 1
return
@@ -106,11 +106,11 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
}
// Return the size of the next rune if valid, 0 otherwise.
-func utf8ValidNext(p []byte) int {
+func Utf8ValidNext(p []byte) int {
c := p[0]
if c < utf8.RuneSelf {
- if invalidAscii(c) {
+ if InvalidAscii(c) {
return 0
}
return 1
@@ -140,47 +140,6 @@ func utf8ValidNext(p []byte) int {
return size
}
-var invalidAsciiTable = [256]bool{
- 0x00: true,
- 0x01: true,
- 0x02: true,
- 0x03: true,
- 0x04: true,
- 0x05: true,
- 0x06: true,
- 0x07: true,
- 0x08: true,
- // 0x09 TAB
- // 0x0A LF
- 0x0B: true,
- 0x0C: true,
- // 0x0D CR
- 0x0E: true,
- 0x0F: true,
- 0x10: true,
- 0x11: true,
- 0x12: true,
- 0x13: true,
- 0x14: true,
- 0x15: true,
- 0x16: true,
- 0x17: true,
- 0x18: true,
- 0x19: true,
- 0x1A: true,
- 0x1B: true,
- 0x1C: true,
- 0x1D: true,
- 0x1E: true,
- 0x1F: true,
- // 0x20 - 0x7E Printable ASCII characters
- 0x7F: true,
-}
-
-func invalidAscii(b byte) bool {
- return invalidAsciiTable[b]
-}
-
// acceptRange gives the range of valid values for the second byte in a UTF-8
// sequence.
type acceptRange struct {
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
index 7c148f48d..149b17f53 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
@@ -1,8 +1,6 @@
package tracker
-import (
- "github.com/pelletier/go-toml/v2/internal/ast"
-)
+import "github.com/pelletier/go-toml/v2/unstable"
// KeyTracker is a tracker that keeps track of the current Key as the AST is
// walked.
@@ -11,19 +9,19 @@ type KeyTracker struct {
}
// UpdateTable sets the state of the tracker with the AST table node.
-func (t *KeyTracker) UpdateTable(node *ast.Node) {
+func (t *KeyTracker) UpdateTable(node *unstable.Node) {
t.reset()
t.Push(node)
}
// UpdateArrayTable sets the state of the tracker with the AST array table node.
-func (t *KeyTracker) UpdateArrayTable(node *ast.Node) {
+func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) {
t.reset()
t.Push(node)
}
// Push the given key on the stack.
-func (t *KeyTracker) Push(node *ast.Node) {
+func (t *KeyTracker) Push(node *unstable.Node) {
it := node.Key()
for it.Next() {
t.k = append(t.k, string(it.Node().Data))
@@ -31,7 +29,7 @@ func (t *KeyTracker) Push(node *ast.Node) {
}
// Pop key from stack.
-func (t *KeyTracker) Pop(node *ast.Node) {
+func (t *KeyTracker) Pop(node *unstable.Node) {
it := node.Key()
for it.Next() {
t.k = t.k[:len(t.k)-1]
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
index a7ee05ba6..40e23f830 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
@@ -5,7 +5,7 @@ import (
"fmt"
"sync"
- "github.com/pelletier/go-toml/v2/internal/ast"
+ "github.com/pelletier/go-toml/v2/unstable"
)
type keyKind uint8
@@ -150,23 +150,23 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) {
// CheckExpression takes a top-level node and checks that it does not contain
// keys that have been seen in previous calls, and validates that types are
// consistent.
-func (s *SeenTracker) CheckExpression(node *ast.Node) error {
+func (s *SeenTracker) CheckExpression(node *unstable.Node) error {
if s.entries == nil {
s.reset()
}
switch node.Kind {
- case ast.KeyValue:
+ case unstable.KeyValue:
return s.checkKeyValue(node)
- case ast.Table:
+ case unstable.Table:
return s.checkTable(node)
- case ast.ArrayTable:
+ case unstable.ArrayTable:
return s.checkArrayTable(node)
default:
panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind))
}
}
-func (s *SeenTracker) checkTable(node *ast.Node) error {
+func (s *SeenTracker) checkTable(node *unstable.Node) error {
if s.currentIdx >= 0 {
s.setExplicitFlag(s.currentIdx)
}
@@ -219,7 +219,7 @@ func (s *SeenTracker) checkTable(node *ast.Node) error {
return nil
}
-func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
+func (s *SeenTracker) checkArrayTable(node *unstable.Node) error {
if s.currentIdx >= 0 {
s.setExplicitFlag(s.currentIdx)
}
@@ -267,7 +267,7 @@ func (s *SeenTracker) checkArrayTable(node *ast.Node) error {
return nil
}
-func (s *SeenTracker) checkKeyValue(node *ast.Node) error {
+func (s *SeenTracker) checkKeyValue(node *unstable.Node) error {
parentIdx := s.currentIdx
it := node.Key()
@@ -297,26 +297,26 @@ func (s *SeenTracker) checkKeyValue(node *ast.Node) error {
value := node.Value()
switch value.Kind {
- case ast.InlineTable:
+ case unstable.InlineTable:
return s.checkInlineTable(value)
- case ast.Array:
+ case unstable.Array:
return s.checkArray(value)
}
return nil
}
-func (s *SeenTracker) checkArray(node *ast.Node) error {
+func (s *SeenTracker) checkArray(node *unstable.Node) error {
it := node.Children()
for it.Next() {
n := it.Node()
switch n.Kind {
- case ast.InlineTable:
+ case unstable.InlineTable:
err := s.checkInlineTable(n)
if err != nil {
return err
}
- case ast.Array:
+ case unstable.Array:
err := s.checkArray(n)
if err != nil {
return err
@@ -326,7 +326,7 @@ func (s *SeenTracker) checkArray(node *ast.Node) error {
return nil
}
-func (s *SeenTracker) checkInlineTable(node *ast.Node) error {
+func (s *SeenTracker) checkInlineTable(node *unstable.Node) error {
if pool.New == nil {
pool.New = func() interface{} {
return &SeenTracker{}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go
index 30a31dcbd..a856bfdb0 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/localtime.go
@@ -4,6 +4,8 @@ import (
"fmt"
"strings"
"time"
+
+ "github.com/pelletier/go-toml/v2/unstable"
)
// LocalDate represents a calendar day in no specific timezone.
@@ -75,7 +77,7 @@ func (d LocalTime) MarshalText() ([]byte, error) {
func (d *LocalTime) UnmarshalText(b []byte) error {
res, left, err := parseLocalTime(b)
if err == nil && len(left) != 0 {
- err = newDecodeError(left, "extra characters")
+ err = unstable.NewParserError(left, "extra characters")
}
if err != nil {
return err
@@ -109,7 +111,7 @@ func (d LocalDateTime) MarshalText() ([]byte, error) {
func (d *LocalDateTime) UnmarshalText(data []byte) error {
res, left, err := parseLocalDateTime(data)
if err == nil && len(left) != 0 {
- err = newDecodeError(left, "extra characters")
+ err = unstable.NewParserError(left, "extra characters")
}
if err != nil {
return err
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go
index acb288315..07aceb902 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/marshaler.go
@@ -12,6 +12,8 @@ import (
"strings"
"time"
"unicode"
+
+ "github.com/pelletier/go-toml/v2/internal/characters"
)
// Marshal serializes a Go value as a TOML document.
@@ -437,7 +439,7 @@ func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byt
func needsQuoting(v string) bool {
// TODO: vectorize
for _, b := range []byte(v) {
- if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) {
+ if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) {
return true
}
}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go
index b7830d139..802e7e4d1 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/strict.go
@@ -1,9 +1,9 @@
package toml
import (
- "github.com/pelletier/go-toml/v2/internal/ast"
"github.com/pelletier/go-toml/v2/internal/danger"
"github.com/pelletier/go-toml/v2/internal/tracker"
+ "github.com/pelletier/go-toml/v2/unstable"
)
type strict struct {
@@ -12,10 +12,10 @@ type strict struct {
// Tracks the current key being processed.
key tracker.KeyTracker
- missing []decodeError
+ missing []unstable.ParserError
}
-func (s *strict) EnterTable(node *ast.Node) {
+func (s *strict) EnterTable(node *unstable.Node) {
if !s.Enabled {
return
}
@@ -23,7 +23,7 @@ func (s *strict) EnterTable(node *ast.Node) {
s.key.UpdateTable(node)
}
-func (s *strict) EnterArrayTable(node *ast.Node) {
+func (s *strict) EnterArrayTable(node *unstable.Node) {
if !s.Enabled {
return
}
@@ -31,7 +31,7 @@ func (s *strict) EnterArrayTable(node *ast.Node) {
s.key.UpdateArrayTable(node)
}
-func (s *strict) EnterKeyValue(node *ast.Node) {
+func (s *strict) EnterKeyValue(node *unstable.Node) {
if !s.Enabled {
return
}
@@ -39,7 +39,7 @@ func (s *strict) EnterKeyValue(node *ast.Node) {
s.key.Push(node)
}
-func (s *strict) ExitKeyValue(node *ast.Node) {
+func (s *strict) ExitKeyValue(node *unstable.Node) {
if !s.Enabled {
return
}
@@ -47,27 +47,27 @@ func (s *strict) ExitKeyValue(node *ast.Node) {
s.key.Pop(node)
}
-func (s *strict) MissingTable(node *ast.Node) {
+func (s *strict) MissingTable(node *unstable.Node) {
if !s.Enabled {
return
}
- s.missing = append(s.missing, decodeError{
- highlight: keyLocation(node),
- message: "missing table",
- key: s.key.Key(),
+ s.missing = append(s.missing, unstable.ParserError{
+ Highlight: keyLocation(node),
+ Message: "missing table",
+ Key: s.key.Key(),
})
}
-func (s *strict) MissingField(node *ast.Node) {
+func (s *strict) MissingField(node *unstable.Node) {
if !s.Enabled {
return
}
- s.missing = append(s.missing, decodeError{
- highlight: keyLocation(node),
- message: "missing field",
- key: s.key.Key(),
+ s.missing = append(s.missing, unstable.ParserError{
+ Highlight: keyLocation(node),
+ Message: "missing field",
+ Key: s.key.Key(),
})
}
@@ -88,7 +88,7 @@ func (s *strict) Error(doc []byte) error {
return err
}
-func keyLocation(node *ast.Node) []byte {
+func keyLocation(node *unstable.Node) []byte {
k := node.Key()
hasOne := k.Next()
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go
index 630a45466..3c6b8fe57 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/types.go
@@ -6,9 +6,9 @@ import (
"time"
)
-var timeType = reflect.TypeOf(time.Time{})
-var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
-var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
-var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
-var sliceInterfaceType = reflect.TypeOf([]interface{}{})
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil))
+var sliceInterfaceType = reflect.TypeOf([]interface{}(nil))
var stringType = reflect.TypeOf("")
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
index d0d7a72d0..70f6ec572 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
@@ -12,16 +12,16 @@ import (
"sync/atomic"
"time"
- "github.com/pelletier/go-toml/v2/internal/ast"
"github.com/pelletier/go-toml/v2/internal/danger"
"github.com/pelletier/go-toml/v2/internal/tracker"
+ "github.com/pelletier/go-toml/v2/unstable"
)
// Unmarshal deserializes a TOML document into a Go value.
//
// It is a shortcut for Decoder.Decode() with the default options.
func Unmarshal(data []byte, v interface{}) error {
- p := parser{}
+ p := unstable.Parser{}
p.Reset(data)
d := decoder{p: &p}
@@ -101,7 +101,7 @@ func (d *Decoder) Decode(v interface{}) error {
return fmt.Errorf("toml: %w", err)
}
- p := parser{}
+ p := unstable.Parser{}
p.Reset(b)
dec := decoder{
p: &p,
@@ -115,7 +115,7 @@ func (d *Decoder) Decode(v interface{}) error {
type decoder struct {
// Which parser instance in use for this decoding session.
- p *parser
+ p *unstable.Parser
// Flag indicating that the current expression is stashed.
// If set to true, calling nextExpr will not actually pull a new expression
@@ -157,7 +157,7 @@ func (d *decoder) typeMismatchError(toml string, target reflect.Type) error {
return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target)
}
-func (d *decoder) expr() *ast.Node {
+func (d *decoder) expr() *unstable.Node {
return d.p.Expression()
}
@@ -208,12 +208,12 @@ func (d *decoder) FromParser(v interface{}) error {
err := d.fromParser(r)
if err == nil {
- return d.strict.Error(d.p.data)
+ return d.strict.Error(d.p.Data())
}
- var e *decodeError
+ var e *unstable.ParserError
if errors.As(err, &e) {
- return wrapDecodeError(d.p.data, e)
+ return wrapDecodeError(d.p.Data(), e)
}
return err
@@ -234,16 +234,16 @@ func (d *decoder) fromParser(root reflect.Value) error {
Rules for the unmarshal code:
- The stack is used to keep track of which values need to be set where.
-- handle* functions <=> switch on a given ast.Kind.
+- handle* functions <=> switch on a given unstable.Kind.
- unmarshalX* functions need to unmarshal a node of kind X.
- An "object" is either a struct or a map.
*/
-func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error {
+func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error {
var x reflect.Value
var err error
- if !(d.skipUntilTable && expr.Kind == ast.KeyValue) {
+ if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) {
err = d.seen.CheckExpression(expr)
if err != nil {
return err
@@ -251,16 +251,16 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error {
}
switch expr.Kind {
- case ast.KeyValue:
+ case unstable.KeyValue:
if d.skipUntilTable {
return nil
}
x, err = d.handleKeyValue(expr, v)
- case ast.Table:
+ case unstable.Table:
d.skipUntilTable = false
d.strict.EnterTable(expr)
x, err = d.handleTable(expr.Key(), v)
- case ast.ArrayTable:
+ case unstable.ArrayTable:
d.skipUntilTable = false
d.strict.EnterArrayTable(expr)
x, err = d.handleArrayTable(expr.Key(), v)
@@ -269,7 +269,7 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error {
}
if d.skipUntilTable {
- if expr.Kind == ast.Table || expr.Kind == ast.ArrayTable {
+ if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable {
d.strict.MissingTable(expr)
}
} else if err == nil && x.IsValid() {
@@ -279,14 +279,14 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error {
return err
}
-func (d *decoder) handleArrayTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
if key.Next() {
return d.handleArrayTablePart(key, v)
}
return d.handleKeyValues(v)
}
-func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Interface:
elem := v.Elem()
@@ -339,13 +339,13 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val
case reflect.Array:
idx := d.arrayIndex(true, v)
if idx >= v.Len() {
- return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx)
+ return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx)
}
elem := v.Index(idx)
_, err := d.handleArrayTable(key, elem)
return v, err
default:
- return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type())
+ return reflect.Value{}, d.typeMismatchError("array table", v.Type())
}
}
@@ -353,7 +353,7 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val
// evaluated like a normal key, but if it returns a collection, it also needs to
// point to the last element of the collection. Unless it is the last part of
// the key, then it needs to create a new element at the end.
-func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
if key.IsLast() {
return d.handleArrayTableCollectionLast(key, v)
}
@@ -390,7 +390,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value)
case reflect.Array:
idx := d.arrayIndex(false, v)
if idx >= v.Len() {
- return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx)
+ return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx)
}
elem := v.Index(idx)
_, err := d.handleArrayTable(key, elem)
@@ -400,7 +400,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value)
return d.handleArrayTable(key, v)
}
-func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) {
+func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) {
var rv reflect.Value
// First, dispatch over v to make sure it is a valid object.
@@ -518,7 +518,7 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle
// HandleArrayTablePart navigates the Go structure v using the key v. It is
// only used for the prefix (non-last) parts of an array-table. When
// encountering a collection, it should go to the last element.
-func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
var makeFn valueMakerFn
if key.IsLast() {
makeFn = makeSliceInterface
@@ -530,10 +530,10 @@ func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (refle
// HandleTable returns a reference when it has checked the next expression but
// cannot handle it.
-func (d *decoder) handleTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
if v.Kind() == reflect.Slice {
if v.Len() == 0 {
- return reflect.Value{}, newDecodeError(key.Node().Data, "cannot store a table in a slice")
+ return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice")
}
elem := v.Index(v.Len() - 1)
x, err := d.handleTable(key, elem)
@@ -560,7 +560,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) {
var rv reflect.Value
for d.nextExpr() {
expr := d.expr()
- if expr.Kind != ast.KeyValue {
+ if expr.Kind != unstable.KeyValue {
// Stash the expression so that fromParser can just loop and use
// the right handler.
// We could just recurse ourselves here, but at least this gives a
@@ -587,7 +587,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) {
}
type (
- handlerFn func(key ast.Iterator, v reflect.Value) (reflect.Value, error)
+ handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error)
valueMakerFn func() reflect.Value
)
@@ -599,11 +599,11 @@ func makeSliceInterface() reflect.Value {
return reflect.MakeSlice(sliceInterfaceType, 0, 16)
}
-func (d *decoder) handleTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) {
return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface)
}
-func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, error) {
+func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) {
// Special case for time, because we allow to unmarshal to it from
// different kind of AST nodes.
if v.Type() == timeType {
@@ -613,7 +613,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err
if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) {
err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data)
if err != nil {
- return false, newDecodeError(d.p.Raw(node.Raw), "%w", err)
+ return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err)
}
return true, nil
@@ -622,7 +622,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err
return false, nil
}
-func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error {
+func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error {
for v.Kind() == reflect.Ptr {
v = initAndDereferencePointer(v)
}
@@ -633,32 +633,32 @@ func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error {
}
switch value.Kind {
- case ast.String:
+ case unstable.String:
return d.unmarshalString(value, v)
- case ast.Integer:
+ case unstable.Integer:
return d.unmarshalInteger(value, v)
- case ast.Float:
+ case unstable.Float:
return d.unmarshalFloat(value, v)
- case ast.Bool:
+ case unstable.Bool:
return d.unmarshalBool(value, v)
- case ast.DateTime:
+ case unstable.DateTime:
return d.unmarshalDateTime(value, v)
- case ast.LocalDate:
+ case unstable.LocalDate:
return d.unmarshalLocalDate(value, v)
- case ast.LocalTime:
+ case unstable.LocalTime:
return d.unmarshalLocalTime(value, v)
- case ast.LocalDateTime:
+ case unstable.LocalDateTime:
return d.unmarshalLocalDateTime(value, v)
- case ast.InlineTable:
+ case unstable.InlineTable:
return d.unmarshalInlineTable(value, v)
- case ast.Array:
+ case unstable.Array:
return d.unmarshalArray(value, v)
default:
panic(fmt.Errorf("handleValue not implemented for %s", value.Kind))
}
}
-func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error {
switch v.Kind() {
case reflect.Slice:
if v.IsNil() {
@@ -729,7 +729,7 @@ func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error {
return nil
}
-func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error {
// Make sure v is an initialized object.
switch v.Kind() {
case reflect.Map:
@@ -746,7 +746,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error
}
return d.unmarshalInlineTable(itable, elem)
default:
- return newDecodeError(itable.Data, "cannot store inline table in Go type %s", v.Kind())
+ return unstable.NewParserError(itable.Data, "cannot store inline table in Go type %s", v.Kind())
}
it := itable.Children()
@@ -765,7 +765,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error
return nil
}
-func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error {
dt, err := parseDateTime(value.Data)
if err != nil {
return err
@@ -775,7 +775,7 @@ func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error {
return nil
}
-func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error {
ld, err := parseLocalDate(value.Data)
if err != nil {
return err
@@ -792,28 +792,28 @@ func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error {
return nil
}
-func (d *decoder) unmarshalLocalTime(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error {
lt, rest, err := parseLocalTime(value.Data)
if err != nil {
return err
}
if len(rest) > 0 {
- return newDecodeError(rest, "extra characters at the end of a local time")
+ return unstable.NewParserError(rest, "extra characters at the end of a local time")
}
v.Set(reflect.ValueOf(lt))
return nil
}
-func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error {
ldt, rest, err := parseLocalDateTime(value.Data)
if err != nil {
return err
}
if len(rest) > 0 {
- return newDecodeError(rest, "extra characters at the end of a local date time")
+ return unstable.NewParserError(rest, "extra characters at the end of a local date time")
}
if v.Type() == timeType {
@@ -828,7 +828,7 @@ func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error
return nil
}
-func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error {
b := value.Data[0] == 't'
switch v.Kind() {
@@ -837,13 +837,13 @@ func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error {
case reflect.Interface:
v.Set(reflect.ValueOf(b))
default:
- return newDecodeError(value.Data, "cannot assign boolean to a %t", b)
+ return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b)
}
return nil
}
-func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error {
f, err := parseFloat(value.Data)
if err != nil {
return err
@@ -854,13 +854,13 @@ func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error {
v.SetFloat(f)
case reflect.Float32:
if f > math.MaxFloat32 {
- return newDecodeError(value.Data, "number %f does not fit in a float32", f)
+ return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f)
}
v.SetFloat(f)
case reflect.Interface:
v.Set(reflect.ValueOf(f))
default:
- return newDecodeError(value.Data, "float cannot be assigned to %s", v.Kind())
+ return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind())
}
return nil
@@ -886,7 +886,7 @@ func init() {
}
}
-func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error {
i, err := parseInteger(value.Data)
if err != nil {
return err
@@ -967,20 +967,20 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
return nil
}
-func (d *decoder) unmarshalString(value *ast.Node, v reflect.Value) error {
+func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error {
switch v.Kind() {
case reflect.String:
v.SetString(string(value.Data))
case reflect.Interface:
v.Set(reflect.ValueOf(string(value.Data)))
default:
- return newDecodeError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind())
+ return unstable.NewParserError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind())
}
return nil
}
-func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) {
d.strict.EnterKeyValue(expr)
v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v)
@@ -994,7 +994,7 @@ func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value
return v, err
}
-func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) {
if key.Next() {
// Still scoping the key
return d.handleKeyValuePart(key, value, v)
@@ -1004,7 +1004,7 @@ func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v refle
return reflect.Value{}, d.handleValue(value, v)
}
-func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) {
+func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) {
// contains the replacement for v
var rv reflect.Value
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
similarity index 60%
rename from test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go
rename to test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
index 9dec2e000..b60d9bfd6 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
@@ -1,4 +1,4 @@
-package ast
+package unstable
import (
"fmt"
@@ -7,13 +7,16 @@ import (
"github.com/pelletier/go-toml/v2/internal/danger"
)
-// Iterator starts uninitialized, you need to call Next() first.
+// Iterator over a sequence of nodes.
+//
+// Starts uninitialized, you need to call Next() first.
//
// For example:
//
// it := n.Children()
// for it.Next() {
-// it.Node()
+// n := it.Node()
+// // do something with n
// }
type Iterator struct {
started bool
@@ -32,42 +35,31 @@ func (c *Iterator) Next() bool {
}
// IsLast returns true if the current node of the iterator is the last
-// one. Subsequent call to Next() will return false.
+// one. Subsequent calls to Next() will return false.
func (c *Iterator) IsLast() bool {
return c.node.next == 0
}
-// Node returns a copy of the node pointed at by the iterator.
+// Node returns a pointer to the node pointed at by the iterator.
func (c *Iterator) Node() *Node {
return c.node
}
-// Root contains a full AST.
+// Node in a TOML expression AST.
//
-// It is immutable once constructed with Builder.
-type Root struct {
- nodes []Node
-}
-
-// Iterator over the top level nodes.
-func (r *Root) Iterator() Iterator {
- it := Iterator{}
- if len(r.nodes) > 0 {
- it.node = &r.nodes[0]
- }
- return it
-}
-
-func (r *Root) at(idx Reference) *Node {
- return &r.nodes[idx]
-}
-
-// Arrays have one child per element in the array. InlineTables have
-// one child per key-value pair in the table. KeyValues have at least
-// two children. The first one is the value. The rest make a
-// potentially dotted key. Table and Array table have one child per
-// element of the key they represent (same as KeyValue, but without
-// the last node being the value).
+// Depending on Kind, its sequence of children should be interpreted
+// differently.
+//
+// - Array have one child per element in the array.
+// - InlineTable have one child per key-value in the table (each of kind
+// InlineTable).
+// - KeyValue have at least two children. The first one is the value. The rest
+// make a potentially dotted key.
+// - Table and ArrayTable's children represent a dotted key (same as
+// KeyValue, but without the first node being the value).
+//
+// When relevant, Raw describes the range of bytes this node is refering to in
+// the input document. Use Parser.Raw() to retrieve the actual bytes.
type Node struct {
Kind Kind
Raw Range // Raw bytes from the input.
@@ -80,13 +72,13 @@ type Node struct {
child int // 0 if no child
}
+// Range of bytes in the document.
type Range struct {
Offset uint32
Length uint32
}
-// Next returns a copy of the next node, or an invalid Node if there
-// is no next node.
+// Next returns a pointer to the next node, or nil if there is no next node.
func (n *Node) Next() *Node {
if n.next == 0 {
return nil
@@ -96,9 +88,9 @@ func (n *Node) Next() *Node {
return (*Node)(danger.Stride(ptr, size, n.next))
}
-// Child returns a copy of the first child node of this node. Other
-// children can be accessed calling Next on the first child. Returns
-// an invalid Node if there is none.
+// Child returns a pointer to the first child node of this node. Other children
+// can be accessed calling Next on the first child. Returns an nil if this Node
+// has no child.
func (n *Node) Child() *Node {
if n.child == 0 {
return nil
@@ -113,9 +105,9 @@ func (n *Node) Valid() bool {
return n != nil
}
-// Key returns the child nodes making the Key on a supported
-// node. Panics otherwise. They are guaranteed to be all be of the
-// Kind Key. A simple key would return just one element.
+// Key returns the children nodes making the Key on a supported node. Panics
+// otherwise. They are guaranteed to be all be of the Kind Key. A simple key
+// would return just one element.
func (n *Node) Key() Iterator {
switch n.Kind {
case KeyValue:
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
new file mode 100644
index 000000000..9538e30df
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
@@ -0,0 +1,71 @@
+package unstable
+
+// root contains a full AST.
+//
+// It is immutable once constructed with Builder.
+type root struct {
+ nodes []Node
+}
+
+// Iterator over the top level nodes.
+func (r *root) Iterator() Iterator {
+ it := Iterator{}
+ if len(r.nodes) > 0 {
+ it.node = &r.nodes[0]
+ }
+ return it
+}
+
+func (r *root) at(idx reference) *Node {
+ return &r.nodes[idx]
+}
+
+type reference int
+
+const invalidReference reference = -1
+
+func (r reference) Valid() bool {
+ return r != invalidReference
+}
+
+type builder struct {
+ tree root
+ lastIdx int
+}
+
+func (b *builder) Tree() *root {
+ return &b.tree
+}
+
+func (b *builder) NodeAt(ref reference) *Node {
+ return b.tree.at(ref)
+}
+
+func (b *builder) Reset() {
+ b.tree.nodes = b.tree.nodes[:0]
+ b.lastIdx = 0
+}
+
+func (b *builder) Push(n Node) reference {
+ b.lastIdx = len(b.tree.nodes)
+ b.tree.nodes = append(b.tree.nodes, n)
+ return reference(b.lastIdx)
+}
+
+func (b *builder) PushAndChain(n Node) reference {
+ newIdx := len(b.tree.nodes)
+ b.tree.nodes = append(b.tree.nodes, n)
+ if b.lastIdx >= 0 {
+ b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
+ }
+ b.lastIdx = newIdx
+ return reference(b.lastIdx)
+}
+
+func (b *builder) AttachChild(parent reference, child reference) {
+ b.tree.nodes[parent].child = int(child) - int(parent)
+}
+
+func (b *builder) Chain(from reference, to reference) {
+ b.tree.nodes[from].next = int(to) - int(from)
+}
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
new file mode 100644
index 000000000..7ff26c53c
--- /dev/null
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
@@ -0,0 +1,3 @@
+// Package unstable provides APIs that do not meet the backward compatibility
+// guarantees yet.
+package unstable
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
similarity index 81%
rename from test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go
rename to test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
index 2b50c67fc..ff9df1bef 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
@@ -1,25 +1,26 @@
-package ast
+package unstable
import "fmt"
+// Kind represents the type of TOML structure contained in a given Node.
type Kind int
const (
- // meta
+ // Meta
Invalid Kind = iota
Comment
Key
- // top level structures
+ // Top level structures
Table
ArrayTable
KeyValue
- // containers values
+ // Containers values
Array
InlineTable
- // values
+ // Values
String
Bool
Float
@@ -30,6 +31,7 @@ const (
DateTime
)
+// String implementation of fmt.Stringer.
func (k Kind) String() string {
switch k {
case Invalid:
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
similarity index 70%
rename from test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go
rename to test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
index 9859a795b..52db88e7a 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/parser.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
@@ -1,50 +1,108 @@
-package toml
+package unstable
import (
"bytes"
+ "fmt"
"unicode"
- "github.com/pelletier/go-toml/v2/internal/ast"
+ "github.com/pelletier/go-toml/v2/internal/characters"
"github.com/pelletier/go-toml/v2/internal/danger"
)
-type parser struct {
- builder ast.Builder
- ref ast.Reference
+// ParserError describes an error relative to the content of the document.
+//
+// It cannot outlive the instance of Parser it refers to, and may cause panics
+// if the parser is reset.
+type ParserError struct {
+ Highlight []byte
+ Message string
+ Key []string // optional
+}
+
+// Error is the implementation of the error interface.
+func (e *ParserError) Error() string {
+ return e.Message
+}
+
+// NewParserError is a convenience function to create a ParserError
+//
+// Warning: Highlight needs to be a subslice of Parser.data, so only slices
+// returned by Parser.Raw are valid candidates.
+func NewParserError(highlight []byte, format string, args ...interface{}) error {
+ return &ParserError{
+ Highlight: highlight,
+ Message: fmt.Errorf(format, args...).Error(),
+ }
+}
+
+// Parser scans over a TOML-encoded document and generates an iterative AST.
+//
+// To prime the Parser, first reset it with the contents of a TOML document.
+// Then, process all top-level expressions sequentially. See Example.
+//
+// Don't forget to check Error() after you're done parsing.
+//
+// Each top-level expression needs to be fully processed before calling
+// NextExpression() again. Otherwise, calls to various Node methods may panic if
+// the parser has moved on the next expression.
+//
+// For performance reasons, go-toml doesn't make a copy of the input bytes to
+// the parser. Make sure to copy all the bytes you need to outlive the slice
+// given to the parser.
+//
+// The parser doesn't provide nodes for comments yet, nor for whitespace.
+type Parser struct {
data []byte
+ builder builder
+ ref reference
left []byte
err error
first bool
}
-func (p *parser) Range(b []byte) ast.Range {
- return ast.Range{
+// Data returns the slice provided to the last call to Reset.
+func (p *Parser) Data() []byte {
+ return p.data
+}
+
+// Range returns a range description that corresponds to a given slice of the
+// input. If the argument is not a subslice of the parser input, this function
+// panics.
+func (p *Parser) Range(b []byte) Range {
+ return Range{
Offset: uint32(danger.SubsliceOffset(p.data, b)),
Length: uint32(len(b)),
}
}
-func (p *parser) Raw(raw ast.Range) []byte {
+// Raw returns the slice corresponding to the bytes in the given range.
+func (p *Parser) Raw(raw Range) []byte {
return p.data[raw.Offset : raw.Offset+raw.Length]
}
-func (p *parser) Reset(b []byte) {
+// Reset brings the parser to its initial state for a given input. It wipes an
+// reuses internal storage to reduce allocation.
+func (p *Parser) Reset(b []byte) {
p.builder.Reset()
- p.ref = ast.InvalidReference
+ p.ref = invalidReference
p.data = b
p.left = b
p.err = nil
p.first = true
}
-//nolint:cyclop
-func (p *parser) NextExpression() bool {
+// NextExpression parses the next top-level expression. If an expression was
+// successfully parsed, it returns true. If the parser is at the end of the
+// document or an error occurred, it returns false.
+//
+// Retrieve the parsed expression with Expression().
+func (p *Parser) NextExpression() bool {
if len(p.left) == 0 || p.err != nil {
return false
}
p.builder.Reset()
- p.ref = ast.InvalidReference
+ p.ref = invalidReference
for {
if len(p.left) == 0 || p.err != nil {
@@ -73,15 +131,18 @@ func (p *parser) NextExpression() bool {
}
}
-func (p *parser) Expression() *ast.Node {
+// Expression returns a pointer to the node representing the last successfully
+// parsed expresion.
+func (p *Parser) Expression() *Node {
return p.builder.NodeAt(p.ref)
}
-func (p *parser) Error() error {
+// Error returns any error that has occured during parsing.
+func (p *Parser) Error() error {
return p.err
}
-func (p *parser) parseNewline(b []byte) ([]byte, error) {
+func (p *Parser) parseNewline(b []byte) ([]byte, error) {
if b[0] == '\n' {
return b[1:], nil
}
@@ -91,14 +152,14 @@ func (p *parser) parseNewline(b []byte) ([]byte, error) {
return rest, err
}
- return nil, newDecodeError(b[0:1], "expected newline but got %#U", b[0])
+ return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0])
}
-func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseExpression(b []byte) (reference, []byte, error) {
// expression = ws [ comment ]
// expression =/ ws keyval ws [ comment ]
// expression =/ ws table ws [ comment ]
- ref := ast.InvalidReference
+ ref := invalidReference
b = p.parseWhitespace(b)
@@ -136,7 +197,7 @@ func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) {
return ref, b, nil
}
-func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseTable(b []byte) (reference, []byte, error) {
// table = std-table / array-table
if len(b) > 1 && b[1] == '[' {
return p.parseArrayTable(b)
@@ -145,12 +206,12 @@ func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) {
return p.parseStdTable(b)
}
-func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) {
// array-table = array-table-open key array-table-close
// array-table-open = %x5B.5B ws ; [[ Double left square bracket
// array-table-close = ws %x5D.5D ; ]] Double right square bracket
- ref := p.builder.Push(ast.Node{
- Kind: ast.ArrayTable,
+ ref := p.builder.Push(Node{
+ Kind: ArrayTable,
})
b = b[2:]
@@ -174,12 +235,12 @@ func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) {
return ref, b, err
}
-func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) {
// std-table = std-table-open key std-table-close
// std-table-open = %x5B ws ; [ Left square bracket
// std-table-close = ws %x5D ; ] Right square bracket
- ref := p.builder.Push(ast.Node{
- Kind: ast.Table,
+ ref := p.builder.Push(Node{
+ Kind: Table,
})
b = b[1:]
@@ -199,15 +260,15 @@ func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) {
return ref, b, err
}
-func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) {
// keyval = key keyval-sep val
- ref := p.builder.Push(ast.Node{
- Kind: ast.KeyValue,
+ ref := p.builder.Push(Node{
+ Kind: KeyValue,
})
key, b, err := p.parseKey(b)
if err != nil {
- return ast.InvalidReference, nil, err
+ return invalidReference, nil, err
}
// keyval-sep = ws %x3D ws ; =
@@ -215,12 +276,12 @@ func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) {
b = p.parseWhitespace(b)
if len(b) == 0 {
- return ast.InvalidReference, nil, newDecodeError(b, "expected = after a key, but the document ends there")
+ return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there")
}
b, err = expect('=', b)
if err != nil {
- return ast.InvalidReference, nil, err
+ return invalidReference, nil, err
}
b = p.parseWhitespace(b)
@@ -237,12 +298,12 @@ func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) {
}
//nolint:cyclop,funlen
-func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseVal(b []byte) (reference, []byte, error) {
// val = string / boolean / array / inline-table / date-time / float / integer
- ref := ast.InvalidReference
+ ref := invalidReference
if len(b) == 0 {
- return ref, nil, newDecodeError(b, "expected value, not eof")
+ return ref, nil, NewParserError(b, "expected value, not eof")
}
var err error
@@ -259,8 +320,8 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) {
}
if err == nil {
- ref = p.builder.Push(ast.Node{
- Kind: ast.String,
+ ref = p.builder.Push(Node{
+ Kind: String,
Raw: p.Range(raw),
Data: v,
})
@@ -277,8 +338,8 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) {
}
if err == nil {
- ref = p.builder.Push(ast.Node{
- Kind: ast.String,
+ ref = p.builder.Push(Node{
+ Kind: String,
Raw: p.Range(raw),
Data: v,
})
@@ -287,22 +348,22 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) {
return ref, b, err
case 't':
if !scanFollowsTrue(b) {
- return ref, nil, newDecodeError(atmost(b, 4), "expected 'true'")
+ return ref, nil, NewParserError(atmost(b, 4), "expected 'true'")
}
- ref = p.builder.Push(ast.Node{
- Kind: ast.Bool,
+ ref = p.builder.Push(Node{
+ Kind: Bool,
Data: b[:4],
})
return ref, b[4:], nil
case 'f':
if !scanFollowsFalse(b) {
- return ref, nil, newDecodeError(atmost(b, 5), "expected 'false'")
+ return ref, nil, NewParserError(atmost(b, 5), "expected 'false'")
}
- ref = p.builder.Push(ast.Node{
- Kind: ast.Bool,
+ ref = p.builder.Push(Node{
+ Kind: Bool,
Data: b[:5],
})
@@ -324,7 +385,7 @@ func atmost(b []byte, n int) []byte {
return b[:n]
}
-func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
+func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
v, rest, err := scanLiteralString(b)
if err != nil {
return nil, nil, nil, err
@@ -333,19 +394,19 @@ func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) {
return v, v[1 : len(v)-1], rest, nil
}
-func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) {
// inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
// inline-table-open = %x7B ws ; {
// inline-table-close = ws %x7D ; }
// inline-table-sep = ws %x2C ws ; , Comma
// inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
- parent := p.builder.Push(ast.Node{
- Kind: ast.InlineTable,
+ parent := p.builder.Push(Node{
+ Kind: InlineTable,
})
first := true
- var child ast.Reference
+ var child reference
b = b[1:]
@@ -356,7 +417,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) {
b = p.parseWhitespace(b)
if len(b) == 0 {
- return parent, nil, newDecodeError(previousB[:1], "inline table is incomplete")
+ return parent, nil, NewParserError(previousB[:1], "inline table is incomplete")
}
if b[0] == '}' {
@@ -371,7 +432,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) {
b = p.parseWhitespace(b)
}
- var kv ast.Reference
+ var kv reference
kv, b, err = p.parseKeyval(b)
if err != nil {
@@ -394,7 +455,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) {
}
//nolint:funlen,cyclop
-func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseValArray(b []byte) (reference, []byte, error) {
// array = array-open [ array-values ] ws-comment-newline array-close
// array-open = %x5B ; [
// array-close = %x5D ; ]
@@ -405,13 +466,13 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
arrayStart := b
b = b[1:]
- parent := p.builder.Push(ast.Node{
- Kind: ast.Array,
+ parent := p.builder.Push(Node{
+ Kind: Array,
})
first := true
- var lastChild ast.Reference
+ var lastChild reference
var err error
for len(b) > 0 {
@@ -421,7 +482,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
}
if len(b) == 0 {
- return parent, nil, newDecodeError(arrayStart[:1], "array is incomplete")
+ return parent, nil, NewParserError(arrayStart[:1], "array is incomplete")
}
if b[0] == ']' {
@@ -430,7 +491,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
if b[0] == ',' {
if first {
- return parent, nil, newDecodeError(b[0:1], "array cannot start with comma")
+ return parent, nil, NewParserError(b[0:1], "array cannot start with comma")
}
b = b[1:]
@@ -439,7 +500,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
return parent, nil, err
}
} else if !first {
- return parent, nil, newDecodeError(b[0:1], "array elements must be separated by commas")
+ return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas")
}
// TOML allows trailing commas in arrays.
@@ -447,7 +508,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
break
}
- var valueRef ast.Reference
+ var valueRef reference
valueRef, b, err = p.parseVal(b)
if err != nil {
return parent, nil, err
@@ -472,7 +533,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) {
return parent, rest, err
}
-func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) {
+func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) {
for len(b) > 0 {
var err error
b = p.parseWhitespace(b)
@@ -501,7 +562,7 @@ func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error)
return b, nil
}
-func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) {
+func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) {
token, rest, err := scanMultilineLiteralString(b)
if err != nil {
return nil, nil, nil, err
@@ -520,7 +581,7 @@ func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte,
}
//nolint:funlen,gocognit,cyclop
-func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) {
+func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) {
// ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
// ml-basic-string-delim
// ml-basic-string-delim = 3quotation-mark
@@ -551,11 +612,11 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
if !escaped {
str := token[startIdx:endIdx]
- verr := utf8TomlValidAlreadyEscaped(str)
+ verr := characters.Utf8TomlValidAlreadyEscaped(str)
if verr.Zero() {
return token, str, rest, nil
}
- return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
+ return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
}
var builder bytes.Buffer
@@ -635,13 +696,13 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
builder.WriteRune(x)
i += 8
default:
- return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c)
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c)
}
i++
} else {
- size := utf8ValidNext(token[i:])
+ size := characters.Utf8ValidNext(token[i:])
if size == 0 {
- return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c)
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c)
}
builder.Write(token[i : i+size])
i += size
@@ -651,7 +712,7 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er
return token, builder.Bytes(), rest, nil
}
-func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseKey(b []byte) (reference, []byte, error) {
// key = simple-key / dotted-key
// simple-key = quoted-key / unquoted-key
//
@@ -662,11 +723,11 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) {
// dot-sep = ws %x2E ws ; . Period
raw, key, b, err := p.parseSimpleKey(b)
if err != nil {
- return ast.InvalidReference, nil, err
+ return invalidReference, nil, err
}
- ref := p.builder.Push(ast.Node{
- Kind: ast.Key,
+ ref := p.builder.Push(Node{
+ Kind: Key,
Raw: p.Range(raw),
Data: key,
})
@@ -681,8 +742,8 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) {
return ref, nil, err
}
- p.builder.PushAndChain(ast.Node{
- Kind: ast.Key,
+ p.builder.PushAndChain(Node{
+ Kind: Key,
Raw: p.Range(raw),
Data: key,
})
@@ -694,9 +755,9 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) {
return ref, b, nil
}
-func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
+func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
if len(b) == 0 {
- return nil, nil, nil, newDecodeError(b, "expected key but found none")
+ return nil, nil, nil, NewParserError(b, "expected key but found none")
}
// simple-key = quoted-key / unquoted-key
@@ -711,12 +772,12 @@ func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) {
key, rest = scanUnquotedKey(b)
return key, key, rest, nil
default:
- return nil, nil, nil, newDecodeError(b[0:1], "invalid character at start of key: %c", b[0])
+ return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0])
}
}
//nolint:funlen,cyclop
-func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
+func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
// basic-string = quotation-mark *basic-char quotation-mark
// quotation-mark = %x22 ; "
// basic-char = basic-unescaped / escaped
@@ -744,11 +805,11 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
// validate the string and return a direct reference to the buffer.
if !escaped {
str := token[startIdx:endIdx]
- verr := utf8TomlValidAlreadyEscaped(str)
+ verr := characters.Utf8TomlValidAlreadyEscaped(str)
if verr.Zero() {
return token, str, rest, nil
}
- return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
+ return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8")
}
i := startIdx
@@ -795,13 +856,13 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
builder.WriteRune(x)
i += 8
default:
- return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c)
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c)
}
i++
} else {
- size := utf8ValidNext(token[i:])
+ size := characters.Utf8ValidNext(token[i:])
if size == 0 {
- return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c)
+ return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c)
}
builder.Write(token[i : i+size])
i += size
@@ -813,7 +874,7 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) {
func hexToRune(b []byte, length int) (rune, error) {
if len(b) < length {
- return -1, newDecodeError(b, "unicode point needs %d character, not %d", length, len(b))
+ return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b))
}
b = b[:length]
@@ -828,19 +889,19 @@ func hexToRune(b []byte, length int) (rune, error) {
case 'A' <= c && c <= 'F':
d = uint32(c - 'A' + 10)
default:
- return -1, newDecodeError(b[i:i+1], "non-hex character")
+ return -1, NewParserError(b[i:i+1], "non-hex character")
}
r = r*16 + d
}
if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 {
- return -1, newDecodeError(b, "escape sequence is invalid Unicode code point")
+ return -1, NewParserError(b, "escape sequence is invalid Unicode code point")
}
return rune(r), nil
}
-func (p *parser) parseWhitespace(b []byte) []byte {
+func (p *Parser) parseWhitespace(b []byte) []byte {
// ws = *wschar
// wschar = %x20 ; Space
// wschar =/ %x09 ; Horizontal tab
@@ -850,24 +911,24 @@ func (p *parser) parseWhitespace(b []byte) []byte {
}
//nolint:cyclop
-func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) {
switch b[0] {
case 'i':
if !scanFollowsInf(b) {
- return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'inf'")
+ return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'")
}
- return p.builder.Push(ast.Node{
- Kind: ast.Float,
+ return p.builder.Push(Node{
+ Kind: Float,
Data: b[:3],
}), b[3:], nil
case 'n':
if !scanFollowsNan(b) {
- return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'nan'")
+ return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'")
}
- return p.builder.Push(ast.Node{
- Kind: ast.Float,
+ return p.builder.Push(Node{
+ Kind: Float,
Data: b[:3],
}), b[3:], nil
case '+', '-':
@@ -898,7 +959,7 @@ func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, err
return p.scanIntOrFloat(b)
}
-func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) {
// scans for contiguous characters in [0-9T:Z.+-], and up to one space if
// followed by a digit.
hasDate := false
@@ -941,30 +1002,30 @@ byteLoop:
}
}
- var kind ast.Kind
+ var kind Kind
if hasTime {
if hasDate {
if hasTz {
- kind = ast.DateTime
+ kind = DateTime
} else {
- kind = ast.LocalDateTime
+ kind = LocalDateTime
}
} else {
- kind = ast.LocalTime
+ kind = LocalTime
}
} else {
- kind = ast.LocalDate
+ kind = LocalDate
}
- return p.builder.Push(ast.Node{
+ return p.builder.Push(Node{
Kind: kind,
Data: b[:i],
}), b[i:], nil
}
//nolint:funlen,gocognit,cyclop
-func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) {
+func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
i := 0
if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' {
@@ -990,8 +1051,8 @@ func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) {
}
}
- return p.builder.Push(ast.Node{
- Kind: ast.Integer,
+ return p.builder.Push(Node{
+ Kind: Integer,
Data: b[:i],
}), b[i:], nil
}
@@ -1013,40 +1074,40 @@ func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) {
if c == 'i' {
if scanFollowsInf(b[i:]) {
- return p.builder.Push(ast.Node{
- Kind: ast.Float,
+ return p.builder.Push(Node{
+ Kind: Float,
Data: b[:i+3],
}), b[i+3:], nil
}
- return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'i' while scanning for a number")
+ return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number")
}
if c == 'n' {
if scanFollowsNan(b[i:]) {
- return p.builder.Push(ast.Node{
- Kind: ast.Float,
+ return p.builder.Push(Node{
+ Kind: Float,
Data: b[:i+3],
}), b[i+3:], nil
}
- return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'n' while scanning for a number")
+ return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number")
}
break
}
if i == 0 {
- return ast.InvalidReference, b, newDecodeError(b, "incomplete number")
+ return invalidReference, b, NewParserError(b, "incomplete number")
}
- kind := ast.Integer
+ kind := Integer
if isFloat {
- kind = ast.Float
+ kind = Float
}
- return p.builder.Push(ast.Node{
+ return p.builder.Push(Node{
Kind: kind,
Data: b[:i],
}), b[i:], nil
@@ -1075,11 +1136,11 @@ func isValidBinaryRune(r byte) bool {
func expect(x byte, b []byte) ([]byte, error) {
if len(b) == 0 {
- return nil, newDecodeError(b, "expected character %c but the document ended here", x)
+ return nil, NewParserError(b, "expected character %c but the document ended here", x)
}
if b[0] != x {
- return nil, newDecodeError(b[0:1], "expected character %c", x)
+ return nil, NewParserError(b[0:1], "expected character %c", x)
}
return b[1:], nil
diff --git a/test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
similarity index 79%
rename from test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go
rename to test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
index bb445fab4..af22ebbe9 100644
--- a/test/integration/vendor/github.com/pelletier/go-toml/v2/scanner.go
+++ b/test/integration/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
@@ -1,4 +1,6 @@
-package toml
+package unstable
+
+import "github.com/pelletier/go-toml/v2/internal/characters"
func scanFollows(b []byte, pattern string) bool {
n := len(pattern)
@@ -54,16 +56,16 @@ func scanLiteralString(b []byte) ([]byte, []byte, error) {
case '\'':
return b[:i+1], b[i+1:], nil
case '\n', '\r':
- return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines")
+ return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines")
}
- size := utf8ValidNext(b[i:])
+ size := characters.Utf8ValidNext(b[i:])
if size == 0 {
- return nil, nil, newDecodeError(b[i:i+1], "invalid character")
+ return nil, nil, NewParserError(b[i:i+1], "invalid character")
}
i += size
}
- return nil, nil, newDecodeError(b[len(b):], "unterminated literal string")
+ return nil, nil, NewParserError(b[len(b):], "unterminated literal string")
}
func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
@@ -98,39 +100,39 @@ func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
i++
if i < len(b) && b[i] == '\'' {
- return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string")
+ return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string")
}
return b[:i], b[i:], nil
}
case '\r':
if len(b) < i+2 {
- return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`)
+ return nil, nil, NewParserError(b[len(b):], `need a \n after \r`)
}
if b[i+1] != '\n' {
- return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`)
+ return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`)
}
i += 2 // skip the \n
continue
}
- size := utf8ValidNext(b[i:])
+ size := characters.Utf8ValidNext(b[i:])
if size == 0 {
- return nil, nil, newDecodeError(b[i:i+1], "invalid character")
+ return nil, nil, NewParserError(b[i:i+1], "invalid character")
}
i += size
}
- return nil, nil, newDecodeError(b[len(b):], `multiline literal string not terminated by '''`)
+ return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`)
}
func scanWindowsNewline(b []byte) ([]byte, []byte, error) {
const lenCRLF = 2
if len(b) < lenCRLF {
- return nil, nil, newDecodeError(b, "windows new line expected")
+ return nil, nil, NewParserError(b, "windows new line expected")
}
if b[1] != '\n' {
- return nil, nil, newDecodeError(b, `windows new line should be \r\n`)
+ return nil, nil, NewParserError(b, `windows new line should be \r\n`)
}
return b[:lenCRLF], b[lenCRLF:], nil
@@ -165,11 +167,11 @@ func scanComment(b []byte) ([]byte, []byte, error) {
if i+1 < len(b) && b[i+1] == '\n' {
return b[:i+1], b[i+1:], nil
}
- return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment")
+ return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
}
- size := utf8ValidNext(b[i:])
+ size := characters.Utf8ValidNext(b[i:])
if size == 0 {
- return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment")
+ return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
}
i += size
@@ -192,17 +194,17 @@ func scanBasicString(b []byte) ([]byte, bool, []byte, error) {
case '"':
return b[:i+1], escaped, b[i+1:], nil
case '\n', '\r':
- return nil, escaped, nil, newDecodeError(b[i:i+1], "basic strings cannot have new lines")
+ return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines")
case '\\':
if len(b) < i+2 {
- return nil, escaped, nil, newDecodeError(b[i:i+1], "need a character after \\")
+ return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\")
}
escaped = true
i++ // skip the next character
}
}
- return nil, escaped, nil, newDecodeError(b[len(b):], `basic string not terminated by "`)
+ return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`)
}
func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
@@ -243,27 +245,27 @@ func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
i++
if i < len(b) && b[i] == '"' {
- return nil, escaped, nil, newDecodeError(b[i-3:i+1], `""" not allowed in multiline basic string`)
+ return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`)
}
return b[:i], escaped, b[i:], nil
}
case '\\':
if len(b) < i+2 {
- return nil, escaped, nil, newDecodeError(b[len(b):], "need a character after \\")
+ return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\")
}
escaped = true
i++ // skip the next character
case '\r':
if len(b) < i+2 {
- return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`)
+ return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`)
}
if b[i+1] != '\n' {
- return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`)
+ return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`)
}
i++ // skip the \n
}
}
- return nil, escaped, nil, newDecodeError(b[len(b):], `multiline basic string not terminated by """`)
+ return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`)
}
diff --git a/test/integration/vendor/github.com/spf13/afero/memmap.go b/test/integration/vendor/github.com/spf13/afero/memmap.go
index ea0798d87..d06975e71 100644
--- a/test/integration/vendor/github.com/spf13/afero/memmap.go
+++ b/test/integration/vendor/github.com/spf13/afero/memmap.go
@@ -142,6 +142,11 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
}
m.mu.Lock()
+ // Dobule check that it doesn't exist.
+ if _, ok := m.getData()[name]; ok {
+ m.mu.Unlock()
+ return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+ }
item := mem.CreateDir(name)
mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
diff --git a/test/integration/vendor/github.com/spf13/viper/Makefile b/test/integration/vendor/github.com/spf13/viper/Makefile
index 130c427e8..3f4234d33 100644
--- a/test/integration/vendor/github.com/spf13/viper/Makefile
+++ b/test/integration/vendor/github.com/spf13/viper/Makefile
@@ -16,7 +16,7 @@ endif
# Dependency versions
GOTESTSUM_VERSION = 1.8.0
-GOLANGCI_VERSION = 1.49.0
+GOLANGCI_VERSION = 1.50.1
# Add the ability to override some variables
# Use with care
diff --git a/test/integration/vendor/github.com/spf13/viper/README.md b/test/integration/vendor/github.com/spf13/viper/README.md
index 63413a7dc..cd3929052 100644
--- a/test/integration/vendor/github.com/spf13/viper/README.md
+++ b/test/integration/vendor/github.com/spf13/viper/README.md
@@ -8,7 +8,7 @@
[](https://github.com/avelino/awesome-go#configuration)
[](https://repl.it/@sagikazarmark/Viper-example#main.go)
-[](https://github.com/spf13/viper/actions?query=workflow%3ACI)
+[](https://github.com/spf13/viper/actions?query=workflow%3ACI)
[](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://goreportcard.com/report/github.com/spf13/viper)

@@ -40,8 +40,8 @@ go get github.com/spf13/viper
## What is Viper?
-Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
-to work within an application, and can handle all types of configuration needs
+Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors).
+It is designed to work within an application, and can handle all types of configuration needs
and formats. It supports:
* setting defaults
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
index 45fddc8b5..a993c5994 100644
--- a/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
@@ -1,39 +1,16 @@
-//go:build viper_toml1
-// +build viper_toml1
-
package toml
import (
- "github.com/pelletier/go-toml"
+ "github.com/pelletier/go-toml/v2"
)
// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding.
type Codec struct{}
func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
- t, err := toml.TreeFromMap(v)
- if err != nil {
- return nil, err
- }
-
- s, err := t.ToTomlString()
- if err != nil {
- return nil, err
- }
-
- return []byte(s), nil
+ return toml.Marshal(v)
}
func (Codec) Decode(b []byte, v map[string]interface{}) error {
- tree, err := toml.LoadBytes(b)
- if err != nil {
- return err
- }
-
- tmap := tree.ToMap()
- for key, value := range tmap {
- v[key] = value
- }
-
- return nil
+ return toml.Unmarshal(b, &v)
}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
deleted file mode 100644
index 112c6d372..000000000
--- a/test/integration/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build !viper_toml1
-// +build !viper_toml1
-
-package toml
-
-import (
- "github.com/pelletier/go-toml/v2"
-)
-
-// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding.
-type Codec struct{}
-
-func (Codec) Encode(v map[string]interface{}) ([]byte, error) {
- return toml.Marshal(v)
-}
-
-func (Codec) Decode(b []byte, v map[string]interface{}) error {
- return toml.Unmarshal(b, &v)
-}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
index 24cc19dfc..82dc136a3 100644
--- a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
+++ b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go
@@ -1,6 +1,6 @@
package yaml
-// import "gopkg.in/yaml.v2"
+import "gopkg.in/yaml.v3"
// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding.
type Codec struct{}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
deleted file mode 100644
index 4c398c2f4..000000000
--- a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build viper_yaml2
-// +build viper_yaml2
-
-package yaml
-
-import yamlv2 "gopkg.in/yaml.v2"
-
-var yaml = struct {
- Marshal func(in interface{}) (out []byte, err error)
- Unmarshal func(in []byte, out interface{}) (err error)
-}{
- Marshal: yamlv2.Marshal,
- Unmarshal: yamlv2.Unmarshal,
-}
diff --git a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
deleted file mode 100644
index 3a4775ced..000000000
--- a/test/integration/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build !viper_yaml2
-// +build !viper_yaml2
-
-package yaml
-
-import yamlv3 "gopkg.in/yaml.v3"
-
-var yaml = struct {
- Marshal func(in interface{}) (out []byte, err error)
- Unmarshal func(in []byte, out interface{}) (err error)
-}{
- Marshal: yamlv3.Marshal,
- Unmarshal: yamlv3.Unmarshal,
-}
diff --git a/test/integration/vendor/github.com/spf13/viper/viper.go b/test/integration/vendor/github.com/spf13/viper/viper.go
index 5c12529b4..06610fc5a 100644
--- a/test/integration/vendor/github.com/spf13/viper/viper.go
+++ b/test/integration/vendor/github.com/spf13/viper/viper.go
@@ -421,13 +421,18 @@ var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props
// SupportedRemoteProviders are universally supported remote providers.
var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"}
+// OnConfigChange sets the event handler that is called when a config file changes.
func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+
+// OnConfigChange sets the event handler that is called when a config file changes.
func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
v.onConfigChange = run
}
+// WatchConfig starts watching a config file for changes.
func WatchConfig() { v.WatchConfig() }
+// WatchConfig starts watching a config file for changes.
func (v *Viper) WatchConfig() {
initWG := sync.WaitGroup{}
initWG.Add(1)
diff --git a/test/integration/vendor/github.com/subosito/gotenv/gotenv.go b/test/integration/vendor/github.com/subosito/gotenv/gotenv.go
index 7b1186e1f..dc013e1e0 100644
--- a/test/integration/vendor/github.com/subosito/gotenv/gotenv.go
+++ b/test/integration/vendor/github.com/subosito/gotenv/gotenv.go
@@ -3,6 +3,7 @@ package gotenv
import (
"bufio"
+ "bytes"
"fmt"
"io"
"os"
@@ -174,9 +175,36 @@ func Write(env Env, filename string) error {
return file.Sync()
}
+// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences).
+// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break).
+func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, bufio.ErrFinalToken
+ }
+
+ idx := bytes.IndexAny(data, "\r\n")
+ switch {
+ case atEOF && idx < 0:
+ return len(data), data, bufio.ErrFinalToken
+
+ case idx < 0:
+ return 0, nil, nil
+ }
+
+ // consume CR or LF
+ eol := idx + 1
+ // detect CRLF
+ if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' {
+ eol++
+ }
+
+ return eol, data[:idx], nil
+}
+
func strictParse(r io.Reader, override bool) (Env, error) {
env := make(Env)
scanner := bufio.NewScanner(r)
+ scanner.Split(splitLines)
firstLine := true
@@ -283,7 +311,6 @@ func parseLine(s string, env Env, override bool) error {
return varReplacement(s, hsq, env, override)
}
val = varRgx.ReplaceAllStringFunc(val, fv)
- val = parseVal(val, env, hdq, override)
}
env[key] = val
@@ -352,18 +379,3 @@ func checkFormat(s string, env Env) error {
return fmt.Errorf("line `%s` doesn't match format", s)
}
-
-func parseVal(val string, env Env, ignoreNewlines bool, override bool) string {
- if strings.Contains(val, "=") && !ignoreNewlines {
- kv := strings.Split(val, "\r")
-
- if len(kv) > 1 {
- val = kv[0]
- for _, l := range kv[1:] {
- _ = parseLine(l, env, override)
- }
- }
- }
-
- return val
-}
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/.lycheeignore b/test/integration/vendor/go.opentelemetry.io/otel/.lycheeignore
index 545d63452..32e481275 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/test/integration/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -1,3 +1,4 @@
http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
+https://github.com/open-telemetry/opentelemetry-go/projects
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/test/integration/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 9f130b8be..2bfe0a941 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/test/integration/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,139 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.12.0/0.35.0] 2023-01-28
+
+### Added
+
+- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `int64` Observer callbacks during their creation. (#3507)
+- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `float64` Observer callbacks during their creation. (#3507)
+- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are used to enable external metric Producers. (#3524)
+- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
+ This new named function type is registered with a `Meter`. (#3564)
+- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
+ The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
+ - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
+ - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
+- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
+ The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
+- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
+ The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
+- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
+ The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
+- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
+ These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+ - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
+ - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
+ - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
+ - `Int64ObservableCounter` replaces the `asyncint64.Counter`
+ - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
+ - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
+ - `Float64Counter` replaces the `syncfloat64.Counter`
+ - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
+ - `Float64Histogram` replaces the `syncfloat64.Histogram`
+ - `Int64Counter` replaces the `syncint64.Counter`
+ - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
+ - `Int64Histogram` replaces the `syncint64.Histogram`
+- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
+ This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
+- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
+- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
+ The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
+
+### Changed
+
+- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507)
+ - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
+ - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
+ - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
+ - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
+- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
+ This `Registration` can be used to unregister callbacks. (#3522)
+- Global error handler uses an atomic value instead of a mutex. (#3543)
+- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
+- Global logger uses an atomic value instead of a mutex. (#3545)
+- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
+- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
+ This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name.
+ Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
+- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
+- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
+ - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
+ - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
+ - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
+ - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
+ - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
+ - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
+- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
+ - The named `Callback` replaces the inline function parameter. (#3564)
+ - `Callback` is required to return an error. (#3576)
+ - `Callback` accepts the added `Observer` parameter added.
+ This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
+ - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
+- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
+ This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
+ Instead it uses the `net.sock.peer` attributes. (#3581)
+- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
+
+### Fixed
+
+- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
+- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
+ Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
+ Use `NewMetricProducer` instead. (#3541)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
+ Use `NewTracerProvider` instead. (#3116)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Int64Histogram`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Float64Histogram`
+
## [1.11.2/0.34.0] 2022-12-05
### Added
@@ -58,7 +191,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Prevent duplicate Prometheus description, unit, and type. (#3469)
- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
-## Removed
+### Removed
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
@@ -2087,7 +2220,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.12.0...HEAD
+[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/Makefile b/test/integration/vendor/go.opentelemetry.io/otel/Makefile
index 68cdfef7d..befb040a7 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/Makefile
+++ b/test/integration/vendor/go.opentelemetry.io/otel/Makefile
@@ -208,11 +208,11 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
- @[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
- @[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- @$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
+ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
+ [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: prerelease
prerelease: | $(MULTIMOD)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/RELEASING.md b/test/integration/vendor/go.opentelemetry.io/otel/RELEASING.md
index 71e576254..77d56c936 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/test/integration/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -6,20 +6,25 @@ New versions of the [OpenTelemetry specification] mean new versions of the `semc
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
-2. Run the `make semconv-generate ...` target from this repository.
+2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
+3. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
-export TAG="v1.7.0" # Change to the release version you are generating.
+export TAG="v1.13.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
-git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG"
+git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG"
+docker pull otel/semconvgen:latest
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
+**Note**, the generation code was changed to generate versions >= 1.13.
+To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)).
+
## Pre-Release
First, decide which module sets will be released and update their versions
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/handler.go b/test/integration/vendor/go.opentelemetry.io/otel/handler.go
index 36cf09f72..ecd363ab5 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/handler.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/handler.go
@@ -17,7 +17,8 @@ package otel // import "go.opentelemetry.io/otel"
import (
"log"
"os"
- "sync"
+ "sync/atomic"
+ "unsafe"
)
var (
@@ -34,28 +35,26 @@ var (
)
type delegator struct {
- lock *sync.RWMutex
- eh ErrorHandler
+ delegate unsafe.Pointer
}
func (d *delegator) Handle(err error) {
- d.lock.RLock()
- defer d.lock.RUnlock()
- d.eh.Handle(err)
+ d.getDelegate().Handle(err)
+}
+
+func (d *delegator) getDelegate() ErrorHandler {
+ return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
}
// setDelegate sets the ErrorHandler delegate.
func (d *delegator) setDelegate(eh ErrorHandler) {
- d.lock.Lock()
- defer d.lock.Unlock()
- d.eh = eh
+ atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
}
func defaultErrorHandler() *delegator {
- return &delegator{
- lock: &sync.RWMutex{},
- eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)},
- }
+ d := &delegator{}
+ d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
+ return d
}
// errLogger logs errors if no delegate is set, otherwise they are delegated.
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/test/integration/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index ccb325871..293c08961 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -17,7 +17,8 @@ package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
"os"
- "sync"
+ "sync/atomic"
+ "unsafe"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
@@ -27,37 +28,36 @@ import (
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
-var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
-var globalLoggerLock = &sync.RWMutex{}
+var globalLogger unsafe.Pointer
+
+func init() {
+ SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+}
// SetLogger overrides the globalLogger with l.
//
// To see Info messages use a logger with `l.V(1).Enabled() == true`
// To see Debug messages use a logger with `l.V(5).Enabled() == true`.
func SetLogger(l logr.Logger) {
- globalLoggerLock.Lock()
- defer globalLoggerLock.Unlock()
- globalLogger = l
+ atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
+}
+
+func getLogger() logr.Logger {
+ return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
}
// Info prints messages about the general state of the API or SDK.
// This should usually be less then 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
- globalLoggerLock.RLock()
- defer globalLoggerLock.RUnlock()
- globalLogger.V(1).Info(msg, keysAndValues...)
+ getLogger().V(1).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
- globalLoggerLock.RLock()
- defer globalLoggerLock.RUnlock()
- globalLogger.Error(err, msg, keysAndValues...)
+ getLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
- globalLoggerLock.RLock()
- defer globalLoggerLock.RUnlock()
- globalLogger.V(5).Info(msg, keysAndValues...)
+ getLogger().V(5).Info(msg, keysAndValues...)
}
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index 7af46c61a..34a474891 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -22,7 +22,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type (
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 7a897e969..6f7fd005b 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -22,7 +22,7 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type containerIDProvider func() (string, error)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index 1c349247b..deebe363a 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
const (
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 3b4d0c14d..ac520dd86 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -19,7 +19,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type osDescriptionProvider func() (string, error)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index 9a169f663..7eaddd34b 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -22,7 +22,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type pidProvider func() int
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 327b8b416..201c17817 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -76,6 +76,7 @@ type TracerProvider struct {
mu sync.Mutex
namedTracer map[instrumentation.Scope]*tracer
spanProcessors atomic.Value
+ isShutdown bool
// These fields are not protected by the lock mu. They are assumed to be
// immutable after creation of the TracerProvider.
@@ -163,6 +164,9 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
p.mu.Lock()
defer p.mu.Unlock()
+ if p.isShutdown {
+ return
+ }
newSPS := spanProcessorStates{}
newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...)
newSPS = append(newSPS, newSpanProcessorState(sp))
@@ -173,6 +177,9 @@ func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
p.mu.Lock()
defer p.mu.Unlock()
+ if p.isShutdown {
+ return
+ }
old := p.spanProcessors.Load().(spanProcessorStates)
if len(old) == 0 {
return
@@ -227,13 +234,18 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error {
return nil
}
-// Shutdown shuts down the span processors in the order they were registered.
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
func (p *TracerProvider) Shutdown(ctx context.Context) error {
spss := p.spanProcessors.Load().(spanProcessorStates)
if len(spss) == 0 {
return nil
}
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.isShutdown = true
+
var retErr error
for _, sps := range spss {
select {
@@ -255,6 +267,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
}
}
}
+ p.spanProcessors.Store(spanProcessorStates{})
return retErr
}
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index a6dcf4b30..5ee9715d2 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -81,7 +81,7 @@ type traceIDRatioSampler struct {
func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
psc := trace.SpanContextFromContext(p.ParentContext)
- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
+ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
if x < ts.traceIDUpperBound {
return SamplingResult{
Decision: RecordAndSample,
@@ -163,10 +163,10 @@ func NeverSample() Sampler {
// the root(Sampler) is used to make sampling decision. If the span has
// a parent, depending on whether the parent is remote and whether it
// is sampled, one of the following samplers will apply:
-// - remoteParentSampled(Sampler) (default: AlwaysOn)
-// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
-// - localParentSampled(Sampler) (default: AlwaysOn)
-// - localParentNotSampled(Sampler) (default: AlwaysOff)
+// - remoteParentSampled(Sampler) (default: AlwaysOn)
+// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+// - localParentSampled(Sampler) (default: AlwaysOn)
+// - localParentNotSampled(Sampler) (default: AlwaysOff)
func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
return parentBased{
root: root,
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index b5d6f5441..5abb0b274 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
new file mode 100644
index 000000000..71a1f7748
--- /dev/null
+++ b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.17.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
new file mode 100644
index 000000000..9b8c559de
--- /dev/null
+++ b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
new file mode 100644
index 000000000..d5c4b5c13
--- /dev/null
+++ b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
new file mode 100644
index 000000000..add7987a2
--- /dev/null
+++ b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -0,0 +1,1118 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device).
+const (
+ // Array of brand name and version separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+ // The platform on which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in the
+ // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the
+ // values in the `browser.platform` attribute should capture the exact value that
+ // the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+ // A boolean that is true if the browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
+ // left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+ // Full user-agent string provided by the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36
+ // (KHTML, '
+ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+ // Note: The user-agent value SHOULD be provided only from browsers that do not
+ // have a mechanism to retrieve brands and platform individually from the User-
+ // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent`
+ // API can be used.
+ BrowserUserAgentKey = attribute.Key("browser.user_agent")
+ // Preferred language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // Name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+ // The cloud account ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+ // The geographical region the resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
+ // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
+ // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
+ // us/global-infrastructure/geographies/), [Google Cloud
+ // regions](https://cloud.google.com/about/locations), or [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+ // Cloud regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+ // The cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
+ // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-
+ // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+ // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
+ // perguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+ // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
+ // aunch_types.html) for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+ // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
+ // t/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-
+ // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+ // The task definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+ // The revision for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // The ARN of an EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// Resources specific to Amazon Web Services.
+const (
+ // The name(s) of the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each write
+ // to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+ // The Amazon Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+ // access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+ // The name(s) of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+ // The ARN(s) of the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
+ // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+ // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
+ // several log streams, so these ARNs necessarily identify both a log group and a
+ // log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// A container instance.
+const (
+ // Container name used by container runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+ // Container ID. Usually a UUID, as for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-
+ // identification). The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+ // The container runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+ // Name of the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+ // Container image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// The software deployment.
+const (
+ // Name of the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// The device on which the process represented by this resource is running.
+const (
+ // A unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values outlined
+ // below. This value is not an advertising identifier and MUST NOT be used as
+ // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
+ // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
+ // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
+ // Firebase Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on best
+ // practices and exact implementation details. Caution should be taken when
+ // storing personal data or anything which can identify a user. GDPR and data
+ // protection laws may apply, ensure you do your own due diligence.
+ DeviceIDKey = attribute.Key("device.id")
+ // The model identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version of the
+ // model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+ // The marketing name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of the
+ // device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+ // The name of the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// A serverless instance.
+const (
+ // The name of the single function that this runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
+ // general.md#source-code-attributes)
+ // span attributes).
+
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `faas.id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+ // The unique ID of the single function that this runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: On some cloud providers, it may not be possible to determine the full ID
+ // at startup,
+ // so consider setting `faas.id` as a span attribute instead.
+
+ // The exact value to use for `faas.id` depends on the cloud provider:
+
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
+ // namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+ // aliases.html)
+ // with the resolved function version, as the same runtime instance may be
+ // invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
+ // resource-names)
+ // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
+ // us/rest/api/resources/resources/get-by-id) of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.We
+ // b/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function app can
+ // host multiple functions that would usually share
+ // a TracerProvider.
+ FaaSIDKey = attribute.Key("faas.id")
+ // The immutable version of the function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+ // versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-
+ // var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+ // The execution environment ID as a string, that will be potentially reused for
+ // other invocations to the same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+ // The amount of memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// A host is defined as a general computing instance.
+const (
+ // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
+ // provider. For non-containerized Linux systems, the `machine-id` located in
+ // `/etc/machine-id` or `/var/lib/dbus/machine-id` may be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+ // Name of the host. On Unix systems, it may contain what the hostname command
+ // returns, or the fully qualified hostname, or another name specified by the
+ // user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+ // Type of host. For Cloud, this must be the machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+ // The CPU architecture the host system is running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+ // Name of the VM image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+ // VM image ID. For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+ // The version string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// A Kubernetes Cluster.
+const (
+ // The name of the cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// A Kubernetes Node object.
+const (
+ // The name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+ // The UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// A Kubernetes Namespace.
+const (
+ // The name of the namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// A Kubernetes Pod object.
+const (
+ // The UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+ // The name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // The name of the Container from Pod specification, must be unique within a Pod.
+ // Container runtime usually uses different globally unique name
+ // (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+ // Number of times the container was restarted. This attribute can be used to
+ // identify a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// A Kubernetes ReplicaSet object.
+const (
+ // The UID of the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+ // The name of the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// A Kubernetes Deployment object.
+const (
+ // The UID of the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+ // The name of the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// A Kubernetes StatefulSet object.
+const (
+ // The UID of the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+ // The name of the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// A Kubernetes DaemonSet object.
+const (
+ // The UID of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+ // The name of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// A Kubernetes Job object.
+const (
+ // The UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+ // The name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// A Kubernetes CronJob object.
+const (
+ // The UID of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+ // The name of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// The operating system (OS) on which the process represented by this resource is running.
+const (
+ // The operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+ // Human readable (not intended to be parsed) OS version information, like e.g.
+ // reported by `ver` or `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+ // Human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+ // The version string of the operating system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// An operating system process.
+const (
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+ // Parent Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+ // The name of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+ // The full path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+ // The command used to launch the process (i.e. the command name). On Linux based
+ // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
+ // can be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+ // The full command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
+ // set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+ // All the command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited strings
+ // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
+ // the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+ // The username of the user that owns the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// The single (language) runtime instance which is monitored.
+const (
+ // The name of the runtime of this process. For compiled native binaries, this
+ // SHOULD be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+ // The version of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+ // An additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// A service instance.
+const (
+ // Logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available, the
+ // value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+ // A namespace for `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace defined
+ // (so the empty/unspecified namespace is simply one more valid namespace). Zero-
+ // length namespace string is assumed equal to unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+ // The string ID of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to distinguish instances of the same service that exist
+ // at the same time (e.g. instances of a horizontally scaled service). It is
+ // preferable for the ID to be persistent and stay the same for the lifetime of
+ // the service instance, however it is acceptable that the ID is ephemeral and
+ // changes during important lifetime events for the service (e.g. service
+ // restarts). If the service has no inherent unique ID that can be used as the
+ // value of this attribute it is recommended to generate a random Version 1 or
+ // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+ // The version string of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// The telemetry SDK used to capture data recorded by the instrumentation libraries.
+const (
+ // The name of the telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+ // The language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+ // The version string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+ // The version string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
+const (
+ // The name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+ // The version of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+ // Additional description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts.
+const (
+ // The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelScopeNameKey = attribute.Key("otel.scope.name")
+ // The version of the instrumentation scope - (`InstrumentationScope.Version` in
+ // OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OtelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts.
+const (
+ // Deprecated, use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelLibraryNameKey = attribute.Key("otel.library.name")
+ // Deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OtelLibraryVersionKey = attribute.Key("otel.library.version")
+)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
new file mode 100644
index 000000000..42fc525d1
--- /dev/null
+++ b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
new file mode 100644
index 000000000..01e5f072a
--- /dev/null
+++ b/test/integration/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -0,0 +1,1892 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This document defines the shared attributes used to report a single exception associated with a span or log.
+const (
+ // The type of the exception (its fully-qualified class name, if applicable). The
+ // dynamic type of the exception should be preferred over the static type in
+ // languages that support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+ // The exception message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+ // A stacktrace as a string in the natural representation for the language
+ // runtime. The representation is to be determined and documented by each language
+ // SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// This document defines attributes for Events represented using Log Records.
+const (
+ // The name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+ // The domain identifies the business context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
+const (
+ // The full invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
+const (
+ // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
+ // .md#id) uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+ // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
+ // d#source-1) identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
+ // service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+ // The [version of the CloudEvents specification](https://github.com/cloudevents/s
+ // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+ // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
+ // ec.md#type) contains a value describing the type of event related to the
+ // originating occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+ // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
+ // md#subject) of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// This document defines semantic conventions for the OpenTracing Shim
+const (
+ // Parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// This document defines the attributes used to perform database client calls.
+const (
+ // An identifier for the database management system (DBMS) product being used. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+ // The connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+ // Username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+ // The fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+ // used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+ // This attribute is used to report the name of the database being accessed. For
+ // commands that switch the database, this should be set to the target database
+ // (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called "schema
+ // name". In case there are multiple layers that could be considered for database
+ // name (e.g. Oracle instance name and schema name), the database name to be used
+ // is the more specific layer (e.g. Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+ // The database statement being executed.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable and not explicitly
+ // disabled via instrumentation configuration.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+ // The name of the operation being executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to attempt any
+ // client-side parsing of `db.statement` just to get this property, but it should
+ // be set if the operation name is provided by the library being instrumented. If
+ // the SQL statement has an ambiguous operation, or performs more than one
+ // operation, this value may be omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+)
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
+ // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// Call-level attributes for Cassandra
+const (
+ // The fetch size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+ // The consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-
+ // oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+ // The name of the primary table that the operation is acting upon, including the
+ // keyspace name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra rather
+ // than sql. It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting upon an
+ // anonymous table, or more than one table, this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+ // Whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+ // The number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+ // The ID of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+ // The data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Call-level attributes for Redis
+const (
+ // The index of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To be used
+ // instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default database
+ // (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// Call-level attributes for MongoDB
+const (
+ // The collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// Call-level attributes for SQL databases
+const (
+ // The name of the primary table that the operation is acting upon, including the
+ // database name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting upon an
+ // anonymous table, or more than one table, this value MUST NOT be set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts.
+const (
+ // Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OtelStatusCodeKey = attribute.Key("otel.status_code")
+ // Description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OtelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
+)
+
+// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
+const (
+ // Type of the trigger which caused this function execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+ // The execution ID of the current function execution.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
+const (
+ // The name of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
+ // DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+ // Describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+ // A string containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+ // in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+ // The document name/table subjected to the operation. For example, in Cloud
+ // Storage or S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // A string containing the function invocation time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+ // in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+ // A string containing the schedule period as [Cron Expression](https://docs.oracl
+ // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // A boolean that is true if the serverless function is executed for the first
+ // time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // The name of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+ // The cloud provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
+ // function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+ // The cloud region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or
+ // GCP, the region in which a function is hosted is essential to uniquely identify
+ // the function and also part of its endpoint. Since it's part of the endpoint
+ // being called, the region is always known to clients. In these cases,
+ // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the
+ // client or not required for identifying the invoked function, setting
+ // `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// These attributes may be used for any network related operation.
+const (
+ // Transport protocol used. See note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+ // Application layer protocol used. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
+ // Version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.app.protocol.version` refers to the version of the protocol used and
+ // might be different from the protocol client's version. If the HTTP client used
+ // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should
+ // be set to `1.1`.
+ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
+ // Remote socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from `net.peer.name`
+ // and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+ // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication, [etc](https://man7.org/linux/man-
+ // pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+ // Remote socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+ // Protocol [address family](https://man7.org/linux/man-
+ // pages/man7/address_families.7.html) which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of
+ // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry
+ // SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+ // Logical remote hostname, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra
+ // DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+ // Logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+ // Logical local hostname or similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+ // Logical local port number, preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+ // Local socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+ // Local socket port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.host.port` and if `net.sock.host.addr` is set.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+ // The internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+ // This describes more details regarding the connection.type. It may be the type
+ // of cell technology connection, but it could be used for describing details
+ // about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+ // The name of the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+ // The mobile carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+ // The mobile carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+ // The ISO 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// Operations that access some remote service.
+const (
+ // The [`service.name`](../../resource/semantic_conventions/README.md#service) of
+ // the remote service. SHOULD be equal to the actual `service.name` resource
+ // attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// These attributes may be used for any operation with an authenticated and/or authorized enduser.
+const (
+ // Username or client_id extracted from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
+ // inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+ // Actual/assumed role the client is making the request under extracted from token
+ // or application security context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+ // Scopes or granted authorities the client currently possesses extracted from
+ // token or application security context. The value would come from the scope
+ // associated with an [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
+ // in a [SAML 2.0 Assertion](http://docs.oasis-
+ // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// These attributes may be used for any operation to store information about a thread that started a span.
+const (
+ // Current "managed" thread ID (as opposed to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+ // Current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// These attributes allow to report this unit of code and therefore to provide more context about the span.
+const (
+ // The method or function name, or equivalent (usually rightmost part of the code
+ // unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+ // The "namespace" within which `code.function` is defined. Usually the qualified
+ // class or module name, such that `code.namespace` + some separator +
+ // `code.function` form a unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+ // The source code file name that identifies the code unit as uniquely as possible
+ // (preferably an absolute file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+ // The line number in `code.filepath` best representing the operation. It SHOULD
+ // point within the code unit named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+ // The column number in `code.filepath` best representing the operation. It SHOULD
+ // point within the code unit named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// This document defines semantic conventions for HTTP client and server Spans.
+const (
+ // HTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+ // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+ // Kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
+ // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+ // Value of the [HTTP User-Agent](https://www.rfc-
+ // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+ // The size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-
+ // length) header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+ // The size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-
+ // length) header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+var (
+ // HTTP/1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// Semantic Convention for HTTP Client
+const (
+ // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
+ // Usually the fragment is not transmitted over HTTP, but if it is known, it
+ // should be included nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the attribute's
+ // value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+ // The ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets resent
+ // by the client, regardless of what was the cause of the resending (e.g.
+ // redirection, authorization failure, 503 Server Unavailable, network issues, or
+ // any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// Semantic Convention for HTTP Server
+const (
+ // The URI scheme identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+ // The full request target as passed in a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+ // The matched route (path template in the format used by the respective server
+ // framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and the URI
+ // path can NOT substitute it.
+ HTTPRouteKey = attribute.Key("http.route")
+ // The IP address of the original client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-
+ // US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which would
+ // identify the network-level peer, which may be a proxy.
+
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // The keys in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+ // The JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
+ // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+ // The JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+ // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+ // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+ // The value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+ // The value of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
+ // ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+ // The value of the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+ // The value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+ // The value of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+ // The value of the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// DynamoDB.CreateTable
+const (
+ // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
+ // field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
+ // number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+ // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// DynamoDB.ListTables
+const (
+ // The value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+ // The the number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// DynamoDB.Query
+const (
+ // The value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// DynamoDB.Scan
+const (
+ // The value of the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+ // The value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+ // The value of the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+ // The value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// DynamoDB.UpdateTable
+const (
+ // The JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+ // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates`
+ // request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span.
+const (
+ // The name of the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+ // The type of the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+ // The GraphQL document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// Semantic convention describing per-message attributes populated on messaging spans or links.
+const (
+ // A value used by the messaging system as an identifier for the message,
+ // represented as a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+ // The [conversation ID](#conversations) identifying the conversation to which the
+ // message belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+ // The (uncompressed) size of the message payload in bytes. Also use this
+ // attribute if it is unknown whether the compressed or uncompressed payload size
+ // is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+ // The compressed size of the message payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// Semantic convention for attributes that describe messaging destination on broker
+const (
+ // The message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic or
+ // other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD uniquely
+ // identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+ // The kind of message destination
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
+ // Low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example would
+ // be a destination name involving a user name or product id. Although the
+ // destination name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+ // A boolean that is true if the message destination is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+ // A boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // The message source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or other
+ // entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely identify
+ // the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+ // The kind of message source
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceKindKey = attribute.Key("messaging.source.kind")
+ // Low cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would be a
+ // source name involving a user name or product id. Although the source name in
+ // this case is of high cardinality, the underlying template is of low cardinality
+ // and can be effectively used for grouping and aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+ // A boolean that is true if the message source is temporary and might not exist
+ // anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+ // A boolean that is true if the message source is anonymous (could be unnamed or
+ // have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+var (
+ // A message received from a queue
+ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
+ // A message received from a topic
+ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
+)
+
+// This document defines general attributes used in messaging systems.
+const (
+ // A string identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+ // A string identifying the kind of messaging operation as defined in the
+ // [Operation names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+ // The number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an operation on
+ // a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans
+ // that operate with a single message. When a messaging client library supports
+ // both batch and single-message API for the same operation, instrumentations
+ // SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use
+ // it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// Semantic convention for a consumer of messages received from a messaging system
+const (
+ // The identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both are
+ // present, or only `messaging.kafka.consumer.group`. For brokers, such as
+ // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// Attributes for RabbitMQ
+const (
+ // RabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// Attributes for Apache Kafka
+const (
+ // Message keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message.id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+ // Name of the Kafka Consumer Group that is handling the message. Only applies to
+ // consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+ // Client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+ // Partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+ // Partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+ // The offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+ // A boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the
+ // value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// Attributes for Apache RocketMQ
+const (
+ // Namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+ // Name of the RocketMQ producer/consumer group that is handling the message. The
+ // client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+ // The unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+ // The timestamp in milliseconds that the delay message is expected to be
+ // delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay and delay
+ // time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+ // The delay time level for delay message, which determines the message delay
+ // time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay and
+ // delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+ // It is essential for FIFO message. Messages that belong to the same message
+ // group are always processed one by one within the same consumer group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+ // Type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+ // The secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+ // Key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+ // Model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// This document defines semantic conventions for remote procedure calls.
+const (
+ // A string identifying the remoting system. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+ // The full (logical) name of the service being called, including its package
+ // name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+ // The name of the (logical) method being called, must be equal to the $method
+ // part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the latter
+ // (e.g., method actually executing the call on the server side, RPC client stub
+ // method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// Tech-specific attributes for gRPC.
+const (
+ // The [numeric status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
+ // request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
+ // 1.0 does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default version
+ // (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+ // `id` property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be cast to
+ // string for simplicity. Use empty string in case of `null` value. Omit entirely
+ // if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/version.go b/test/integration/vendor/go.opentelemetry.io/otel/version.go
index 00b79bcc2..bda8f7cbf 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/version.go
+++ b/test/integration/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.11.2"
+ return "1.12.0"
}
diff --git a/test/integration/vendor/go.opentelemetry.io/otel/versions.yaml b/test/integration/vendor/go.opentelemetry.io/otel/versions.yaml
index 611879def..66f456222 100644
--- a/test/integration/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/test/integration/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,7 +14,7 @@
module-sets:
stable-v1:
- version: v1.11.2
+ version: v1.12.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
@@ -34,7 +34,7 @@ module-sets:
- go.opentelemetry.io/otel/trace
- go.opentelemetry.io/otel/sdk
experimental-metrics:
- version: v0.34.0
+ version: v0.35.0
modules:
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
diff --git a/test/integration/vendor/modules.txt b/test/integration/vendor/modules.txt
index a3de02dee..35b5098e4 100644
--- a/test/integration/vendor/modules.txt
+++ b/test/integration/vendor/modules.txt
@@ -1,11 +1,9 @@
-# cloud.google.com/go/compute/metadata v0.2.3
-## explicit; go 1.19
# github.com/AlecAivazis/survey/v2 v2.3.6
## explicit; go 1.13
github.com/AlecAivazis/survey/v2
github.com/AlecAivazis/survey/v2/core
github.com/AlecAivazis/survey/v2/terminal
-# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1
+# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
## explicit; go 1.16
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
@@ -104,7 +102,7 @@ github.com/docker/buildx/util/platformutil
github.com/docker/buildx/util/progress
github.com/docker/buildx/util/resolver
github.com/docker/buildx/util/waitmap
-# github.com/docker/cli v20.10.21+incompatible => github.com/docker/cli v20.10.3-0.20221013132413-1d6c6e2367e2+incompatible
+# github.com/docker/cli v20.10.23+incompatible => github.com/docker/cli v20.10.3-0.20221013132413-1d6c6e2367e2+incompatible
## explicit
github.com/docker/cli/cli
github.com/docker/cli/cli-plugins/manager
@@ -167,7 +165,7 @@ github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
github.com/docker/distribution/uuid
-# github.com/docker/docker v20.10.21+incompatible => github.com/docker/docker v20.10.3-0.20221013203545-33ab36d6b304+incompatible
+# github.com/docker/docker v23.0.0+incompatible => github.com/docker/docker v20.10.3-0.20221013203545-33ab36d6b304+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -248,6 +246,8 @@ github.com/go-resty/resty/v2
# github.com/gofrs/flock v0.8.1
## explicit
github.com/gofrs/flock
+# github.com/gofrs/uuid v4.4.0+incompatible
+## explicit
# github.com/gogo/googleapis v1.4.1
## explicit; go 1.12
github.com/gogo/googleapis/google/rpc
@@ -531,12 +531,13 @@ github.com/orcaman/concurrent-map
# github.com/pelletier/go-toml v1.9.5
## explicit; go 1.12
github.com/pelletier/go-toml
-# github.com/pelletier/go-toml/v2 v2.0.5
+# github.com/pelletier/go-toml/v2 v2.0.6
## explicit; go 1.16
github.com/pelletier/go-toml/v2
-github.com/pelletier/go-toml/v2/internal/ast
+github.com/pelletier/go-toml/v2/internal/characters
github.com/pelletier/go-toml/v2/internal/danger
github.com/pelletier/go-toml/v2/internal/tracker
+github.com/pelletier/go-toml/v2/unstable
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
@@ -591,7 +592,7 @@ github.com/shirou/gopsutil/v3/process
# github.com/sirupsen/logrus v1.9.0
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/spf13/afero v1.9.2
+# github.com/spf13/afero v1.9.3
## explicit; go 1.16
github.com/spf13/afero
github.com/spf13/afero/internal/common
@@ -627,7 +628,7 @@ github.com/stretchr/objx
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
github.com/stretchr/testify/require
-# github.com/subosito/gotenv v1.4.1
+# github.com/subosito/gotenv v1.4.2
## explicit; go 1.18
github.com/subosito/gotenv
# github.com/testcontainers/testcontainers-go v0.17.0
@@ -695,7 +696,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace
# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0
## explicit; go 1.18
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
-# go.opentelemetry.io/otel v1.11.2
+# go.opentelemetry.io/otel v1.12.0
## explicit; go 1.18
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -708,6 +709,7 @@ go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.12.0
+go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.7.0
# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1
## explicit; go 1.16
@@ -734,14 +736,14 @@ go.opentelemetry.io/otel/metric/instrument/syncfloat64
go.opentelemetry.io/otel/metric/instrument/syncint64
go.opentelemetry.io/otel/metric/internal/global
go.opentelemetry.io/otel/metric/unit
-# go.opentelemetry.io/otel/sdk v1.11.1
+# go.opentelemetry.io/otel/sdk v1.12.0
## explicit; go 1.18
go.opentelemetry.io/otel/sdk/instrumentation
go.opentelemetry.io/otel/sdk/internal
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/trace v1.11.2
+# go.opentelemetry.io/otel/trace v1.12.0
## explicit; go 1.18
go.opentelemetry.io/otel/trace
# go.opentelemetry.io/proto/otlp v0.12.0