From 616aa8ff0a7c8a06eaf19bb0da3e349ceccbde10 Mon Sep 17 00:00:00 2001 From: Nilekh Chaudhari <1626598+nilekhc@users.noreply.github.com> Date: Fri, 21 Jul 2023 01:29:18 -0700 Subject: [PATCH] feat: implements external data response cache (#2823) Signed-off-by: Nilekh Chaudhari <1626598+nilekhc@users.noreply.github.com> --- go.mod | 45 +- go.sum | 96 +- main.go | 49 +- .../go/compute/internal/version.go | 2 +- .../apiv3/v2/alert_policy_client.go | 2 +- .../go/monitoring/apiv3/v2/doc.go | 10 +- .../monitoring/apiv3/v2/gapic_metadata.json | 29 + .../go/monitoring/apiv3/v2/group_client.go | 2 +- .../go/monitoring/apiv3/v2/metric_client.go | 2 +- .../apiv3/v2/monitoringpb/alert.pb.go | 2 +- .../apiv3/v2/monitoringpb/alert_service.pb.go | 2 +- .../apiv3/v2/monitoringpb/common.pb.go | 2 +- .../v2/monitoringpb/dropped_labels.pb.go | 2 +- .../apiv3/v2/monitoringpb/group.pb.go | 2 +- .../apiv3/v2/monitoringpb/group_service.pb.go | 2 +- .../apiv3/v2/monitoringpb/metric.pb.go | 2 +- .../v2/monitoringpb/metric_service.pb.go | 2 +- .../v2/monitoringpb/mutation_record.pb.go | 2 +- .../apiv3/v2/monitoringpb/notification.pb.go | 2 +- .../monitoringpb/notification_service.pb.go | 2 +- .../apiv3/v2/monitoringpb/query_service.pb.go | 2 +- .../apiv3/v2/monitoringpb/service.pb.go | 2 +- .../v2/monitoringpb/service_service.pb.go | 2 +- .../apiv3/v2/monitoringpb/snooze.pb.go | 314 ++ .../v2/monitoringpb/snooze_service.pb.go | 867 +++ .../apiv3/v2/monitoringpb/span_context.pb.go | 2 +- .../apiv3/v2/monitoringpb/uptime.pb.go | 2 +- .../v2/monitoringpb/uptime_service.pb.go | 2 +- .../apiv3/v2/notification_channel_client.go | 2 +- .../go/monitoring/apiv3/v2/query_client.go | 2 +- .../apiv3/v2/service_monitoring_client.go | 2 +- .../go/monitoring/apiv3/v2/snooze_client.go | 400 ++ .../apiv3/v2/uptime_check_client.go | 2 +- .../go/monitoring/apiv3/v2/version.go | 2 +- .../go/monitoring/internal/version.go | 2 +- vendor/cloud.google.com/go/trace/apiv2/doc.go | 2 +- .../go/trace/internal/version.go | 2 +- vendor/github.com/go-ini/ini/.editorconfig | 12 + vendor/github.com/go-ini/ini/.gitignore | 7 + vendor/github.com/go-ini/ini/.golangci.yml | 27 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/Makefile | 15 + vendor/github.com/go-ini/ini/README.md | 43 + vendor/github.com/go-ini/ini/codecov.yml | 16 + vendor/github.com/go-ini/ini/data_source.go | 76 + vendor/github.com/go-ini/ini/deprecated.go | 22 + vendor/github.com/go-ini/ini/error.go | 49 + vendor/github.com/go-ini/ini/file.go | 541 ++ vendor/github.com/go-ini/ini/helper.go | 24 + vendor/github.com/go-ini/ini/ini.go | 176 + vendor/github.com/go-ini/ini/key.go | 837 +++ vendor/github.com/go-ini/ini/parser.go | 520 ++ vendor/github.com/go-ini/ini/section.go | 256 + vendor/github.com/go-ini/ini/struct.go | 747 +++ .../client/client.go | 37 +- .../client/util/util.go | 26 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 7 + .../googleapis/gax-go/v2/apierror/apierror.go | 11 +- .../googleapis/gax-go/v2/internal/version.go | 2 +- .../inconshreveable/mousetrap/trap_others.go | 1 + .../inconshreveable/mousetrap/trap_windows.go | 88 +- .../mousetrap/trap_windows_1.4.go | 46 - .../pkg/client/drivers/rego/args.go | 8 + .../pkg/client/drivers/rego/builtin.go | 80 +- .../pkg/client/drivers/rego/driver.go | 3 + .../constraint/pkg/externaldata/cache.go | 70 + .../open-policy-agent/opa/ast/builtins.go | 27 +- .../open-policy-agent/opa/ast/check.go | 2 +- .../open-policy-agent/opa/ast/compile.go | 36 +- .../open-policy-agent/opa/ast/env.go | 110 + .../open-policy-agent/opa/ast/index.go | 19 +- .../open-policy-agent/opa/ast/parser_ext.go | 93 +- .../open-policy-agent/opa/ast/term.go | 36 +- .../open-policy-agent/opa/bundle/bundle.go | 29 +- .../open-policy-agent/opa/bundle/file.go | 115 +- .../open-policy-agent/opa/bundle/filefs.go | 24 +- .../opa/capabilities/v0.52.0.json | 4615 ++++++++++++++++ .../opa/capabilities/v0.53.0.json | 4640 +++++++++++++++++ .../opa/capabilities/v0.53.1.json | 4640 +++++++++++++++++ .../opa/capabilities/v0.54.0.json | 4640 +++++++++++++++++ .../open-policy-agent/opa/config/config.go | 257 + .../open-policy-agent/opa/format/format.go | 16 +- .../open-policy-agent/opa/hooks/hooks.go | 77 + .../internal/compiler/wasm/opa/callgraph.csv | 80 +- .../opa/internal/compiler/wasm/opa/opa.wasm | Bin 423898 -> 431451 bytes .../opa/internal/compiler/wasm/wasm.go | 2 +- .../opa/internal/config/config.go | 168 + .../opa/internal/errors/join.go | 53 + .../opa/internal/errors/join_go1.20.go | 7 + .../opa/internal/gojsonschema/validation.go | 4 +- .../opa/internal/planner/planner.go | 61 +- .../opa/internal/planner/rules.go | 4 + .../opa/internal/providers/aws/ecr.go | 148 + .../opa/internal/providers/aws/signing_v4.go | 42 +- .../opa/internal/providers/aws/util.go | 45 + .../opa/internal/runtime/init/init.go | 213 + .../opa/internal/strvals/doc.go | 33 + .../opa/internal/strvals/parser.go | 431 ++ .../opa/loader/extension/extension.go | 40 + .../open-policy-agent/opa/loader/loader.go | 93 +- .../open-policy-agent/opa/logging/logging.go | 233 + .../open-policy-agent/opa/plugins/plugins.go | 983 ++++ .../opa/plugins/rest/auth.go | 693 +++ .../open-policy-agent/opa/plugins/rest/aws.go | 519 ++ .../opa/plugins/rest/azure.go | 157 + .../open-policy-agent/opa/plugins/rest/gcp.go | 173 + .../opa/plugins/rest/rest.go | 368 ++ .../open-policy-agent/opa/rego/plugins.go | 43 + .../open-policy-agent/opa/rego/rego.go | 291 +- .../open-policy-agent/opa/topdown/builtins.go | 6 +- .../opa/topdown/builtins/builtins.go | 6 +- .../opa/topdown/cache/cache.go | 15 +- .../open-policy-agent/opa/topdown/crypto.go | 100 +- .../open-policy-agent/opa/topdown/errors.go | 10 + .../open-policy-agent/opa/topdown/eval.go | 6 +- .../open-policy-agent/opa/topdown/http.go | 97 +- .../open-policy-agent/opa/topdown/object.go | 11 +- .../opa/topdown/providers.go | 2 +- .../open-policy-agent/opa/topdown/query.go | 8 + .../open-policy-agent/opa/topdown/regex.go | 2 +- .../open-policy-agent/opa/topdown/subset.go | 4 +- .../open-policy-agent/opa/topdown/time.go | 28 +- .../open-policy-agent/opa/topdown/tokens.go | 2 + .../open-policy-agent/opa/util/json.go | 27 +- .../open-policy-agent/opa/version/version.go | 24 +- .../opa/version/version_go1.18.go | 25 - .../client_golang/prometheus/desc.go | 4 +- .../client_golang/prometheus/histogram.go | 10 +- .../client_golang/prometheus/promhttp/http.go | 19 +- .../client_golang/prometheus/vec.go | 35 +- .../prometheus/procfs/Makefile.common | 16 +- vendor/github.com/prometheus/procfs/fs.go | 9 +- .../prometheus/procfs/fs_statfs_notype.go | 23 + .../prometheus/procfs/fs_statfs_type.go | 33 + .../prometheus/procfs/internal/util/parse.go | 15 + .../prometheus/procfs/mountstats.go | 6 +- .../prometheus/procfs/net_conntrackstat.go | 88 +- .../prometheus/procfs/net_softnet.go | 5 + .../prometheus/procfs/net_wireless.go | 182 + .../github.com/prometheus/procfs/netstat.go | 25 +- vendor/github.com/prometheus/procfs/proc.go | 22 +- .../github.com/prometheus/procfs/proc_stat.go | 6 +- .../prometheus/procfs/proc_status.go | 32 + vendor/github.com/prometheus/procfs/thread.go | 9 +- vendor/github.com/sirupsen/logrus/README.md | 8 +- vendor/github.com/sirupsen/logrus/writer.go | 34 +- vendor/github.com/spf13/cobra/.golangci.yml | 2 +- vendor/github.com/spf13/cobra/Makefile | 8 +- vendor/github.com/spf13/cobra/README.md | 4 +- vendor/github.com/spf13/cobra/active_help.go | 2 +- vendor/github.com/spf13/cobra/args.go | 4 +- .../spf13/cobra/bash_completions.go | 4 +- .../spf13/cobra/bash_completionsV2.go | 71 +- vendor/github.com/spf13/cobra/cobra.go | 6 +- vendor/github.com/spf13/cobra/command.go | 54 +- .../github.com/spf13/cobra/command_notwin.go | 2 +- vendor/github.com/spf13/cobra/command_win.go | 2 +- vendor/github.com/spf13/cobra/completions.go | 13 +- .../spf13/cobra/fish_completions.go | 78 +- vendor/github.com/spf13/cobra/flag_groups.go | 2 +- .../spf13/cobra/powershell_completions.go | 27 +- .../spf13/cobra/projects_using_cobra.md | 6 +- .../spf13/cobra/shell_completions.go | 2 +- .../spf13/cobra/shell_completions.md | 30 +- vendor/github.com/spf13/cobra/user_guide.md | 37 +- .../github.com/spf13/cobra/zsh_completions.go | 17 +- .../testify/assert/assertion_compare.go | 36 +- .../testify/assert/assertion_format.go | 216 +- .../testify/assert/assertion_forward.go | 432 +- .../testify/assert/assertion_order.go | 24 +- .../stretchr/testify/assert/assertions.go | 306 +- .../github.com/stretchr/testify/assert/doc.go | 43 +- .../testify/assert/http_assertions.go | 12 +- .../stretchr/testify/require/doc.go | 23 +- .../stretchr/testify/require/require.go | 444 +- .../testify/require/require_forward.go | 432 +- .../yashtewari/glob-intersection/glob.go | 6 +- .../yashtewari/glob-intersection/non_empty.go | 17 +- vendor/golang.org/x/net/http2/server.go | 9 +- vendor/golang.org/x/net/http2/transport.go | 21 +- vendor/golang.org/x/net/http2/writesched.go | 3 +- .../x/net/http2/writesched_roundrobin.go | 119 + vendor/golang.org/x/oauth2/README.md | 12 +- vendor/golang.org/x/oauth2/google/default.go | 28 +- vendor/golang.org/x/oauth2/google/doc.go | 65 +- vendor/golang.org/x/oauth2/google/google.go | 15 +- .../externalaccount/basecredentials.go | 32 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/token.go | 14 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 6 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 30 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 17 +- .../x/sys/unix/zerrors_linux_sparc64.go | 48 + .../golang.org/x/sys/unix/zsyscall_linux.go | 14 + .../x/sys/unix/zsyscall_openbsd_386.go | 22 + .../x/sys/unix/zsyscall_openbsd_386.s | 10 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 10 + .../x/sys/unix/zsyscall_openbsd_arm.go | 22 + .../x/sys/unix/zsyscall_openbsd_arm.s | 10 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 22 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 10 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 22 + .../x/sys/unix/zsyscall_openbsd_mips64.s | 10 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 22 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 12 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 22 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 10 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 46 + .../x/sys/windows/syscall_windows.go | 13 +- .../x/sys/windows/zsyscall_windows.go | 8 +- .../cert/default_cert.go | 0 .../cert/enterprise_cert.go | 4 +- .../cert/secureconnect_cert.go | 0 .../google.golang.org/api/internal/creds.go | 76 +- .../internal/dca => internal}/dca.go | 13 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/transport/grpc/dial.go | 9 +- .../transport/http/configure_http2_go116.go | 26 - .../http/configure_http2_not_go116.go | 17 - .../api/transport/http/dial.go | 26 +- .../googleapis/api/annotations/client.pb.go | 381 +- .../api/annotations/field_behavior.pb.go | 20 +- .../googleapis/api/annotations/http.pb.go | 17 +- .../googleapis/api/annotations/resource.pb.go | 66 +- .../googleapis/api/annotations/routing.pb.go | 62 +- .../api/distribution/distribution.pb.go | 17 +- .../googleapis/api/httpbody/httpbody.pb.go | 4 +- .../genproto/googleapis/api/label/label.pb.go | 4 +- .../googleapis/api/launch_stage.pb.go | 4 +- .../googleapis/api/metric/metric.pb.go | 20 +- .../api/monitoredres/monitored_resource.pb.go | 45 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 4 - .../grpc/attributes/attributes.go | 29 + .../grpc/balancer/balancer.go | 2 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpc/balancer_conn_wrappers.go | 486 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/call.go | 5 + vendor/google.golang.org/grpc/clientconn.go | 622 ++- .../alts/internal/handshaker/handshaker.go | 4 +- .../internal/handshaker/service/service.go | 18 + .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 30 + .../grpc/health/grpc_health_v1/health.pb.go | 2 +- vendor/google.golang.org/grpc/idle.go | 287 + .../grpc/internal/binarylog/binarylog.go | 3 + .../grpc/internal/binarylog/method_logger.go | 9 + .../grpc/internal/buffer/unbounded.go | 26 +- .../grpc/internal/envconfig/envconfig.go | 4 + .../grpc/internal/envconfig/observability.go | 6 + .../grpc/internal/envconfig/xds.go | 21 +- .../grpc/internal/grpcrand/grpcrand.go | 14 + .../internal/grpcsync/callback_serializer.go | 119 + .../grpc/internal/internal.go | 24 + .../grpc/internal/serviceconfig/duration.go | 130 + .../grpc/internal/transport/controlbuf.go | 17 +- .../grpc/internal/transport/handler_server.go | 11 +- .../grpc/internal/transport/http2_client.go | 27 +- .../grpc/internal/transport/http2_server.go | 63 +- .../grpc/internal/transport/http_util.go | 2 - .../grpc/internal/transport/logging.go | 40 + .../grpc/internal/transport/transport.go | 2 +- .../google.golang.org/grpc/picker_wrapper.go | 38 +- vendor/google.golang.org/grpc/pickfirst.go | 52 +- .../grpc/resolver/resolver.go | 16 +- .../grpc/resolver_conn_wrapper.go | 229 +- vendor/google.golang.org/grpc/server.go | 58 +- .../google.golang.org/grpc/service_config.go | 75 +- .../google.golang.org/grpc/status/status.go | 30 +- vendor/google.golang.org/grpc/stream.go | 29 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 64 +- 277 files changed, 34354 insertions(+), 3224 deletions(-) create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go create mode 100644 vendor/github.com/go-ini/ini/.editorconfig create mode 100644 vendor/github.com/go-ini/ini/.gitignore create mode 100644 vendor/github.com/go-ini/ini/.golangci.yml create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/codecov.yml create mode 100644 vendor/github.com/go-ini/ini/data_source.go create mode 100644 vendor/github.com/go-ini/ini/deprecated.go create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/file.go create mode 100644 vendor/github.com/go-ini/ini/helper.go create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/go-ini/ini/struct.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.52.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.53.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.53.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.54.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/config/config.go create mode 100644 vendor/github.com/open-policy-agent/opa/hooks/hooks.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/config/config.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/errors/join.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go create mode 100644 vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go create mode 100644 vendor/github.com/open-policy-agent/opa/loader/extension/extension.go create mode 100644 vendor/github.com/open-policy-agent/opa/logging/logging.go create mode 100644 vendor/github.com/open-policy-agent/opa/plugins/plugins.go create mode 100644 vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go create mode 100644 vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go create mode 100644 vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go create mode 100644 vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go create mode 100644 vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go create mode 100644 vendor/github.com/open-policy-agent/opa/rego/plugins.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_notype.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_type.go create mode 100644 vendor/github.com/prometheus/procfs/net_wireless.go create mode 100644 vendor/golang.org/x/net/http2/writesched_roundrobin.go rename vendor/google.golang.org/api/{transport => internal}/cert/default_cert.go (100%) rename vendor/google.golang.org/api/{transport => internal}/cert/enterprise_cert.go (93%) rename vendor/google.golang.org/api/{transport => internal}/cert/secureconnect_cert.go (100%) rename vendor/google.golang.org/api/{transport/internal/dca => internal}/dca.go (92%) delete mode 100644 vendor/google.golang.org/api/transport/http/configure_http2_go116.go delete mode 100644 vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go create mode 100644 vendor/google.golang.org/grpc/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go diff --git a/go.mod b/go.mod index 58d7145cbef..b002b6a6390 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ go 1.20 replace sigs.k8s.io/controller-runtime => ./third_party/sigs.k8s.io/controller-runtime require ( - cloud.google.com/go/trace v1.8.0 + cloud.google.com/go/trace v1.9.0 contrib.go.opencensus.io/exporter/ocagent v0.7.0 contrib.go.opencensus.io/exporter/prometheus v0.4.2 contrib.go.opencensus.io/exporter/stackdriver v0.13.14 @@ -19,16 +19,16 @@ require ( github.com/google/uuid v1.3.0 github.com/onsi/gomega v1.27.7 github.com/open-policy-agent/cert-controller v0.8.0 - github.com/open-policy-agent/frameworks/constraint v0.0.0-20230606213221-6ccacf85c2c5 + github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.15.1 - github.com/spf13/cobra v1.6.1 - github.com/stretchr/testify v1.8.4 + github.com/prometheus/client_golang v1.16.0 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.2 go.opencensus.io v0.24.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/zap v1.24.0 - golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.5.0 + golang.org/x/net v0.11.0 + golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.2.0 golang.org/x/time v0.3.0 gopkg.in/yaml.v2 v2.4.0 @@ -46,9 +46,9 @@ require ( ) require ( - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/monitoring v1.8.0 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect @@ -75,6 +75,7 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -89,12 +90,12 @@ require ( github.com/google/cel-go v0.12.6 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -109,23 +110,23 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/open-policy-agent/opa v0.51.0 // indirect + github.com/open-policy-agent/opa v0.54.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/prometheus/prometheus v0.35.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/yashtewari/glob-intersection v0.1.0 // indirect + github.com/yashtewari/glob-intersection v0.2.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect @@ -138,14 +139,14 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect - google.golang.org/api v0.108.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.1 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/apiserver v0.27.2 // indirect diff --git a/go.sum b/go.sum index 93501988ee1..c64f31b8359 100644 --- a/go.sum +++ b/go.sum @@ -29,7 +29,7 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -39,16 +39,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= -cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -58,8 +58,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= contrib.go.opencensus.io/exporter/ocagent v0.7.0 h1:BEfdCTXfMV30tLZD8c9n64V/tIZX5+9sXiuFLnrr1k8= contrib.go.opencensus.io/exporter/ocagent v0.7.0/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= @@ -468,6 +468,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -684,15 +686,15 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -773,8 +775,8 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= @@ -960,10 +962,10 @@ github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/open-policy-agent/cert-controller v0.8.0 h1:pao3WCLsKGz5dSWSlNUFrNFQdXtVTQ3lVDgk2IelH34= github.com/open-policy-agent/cert-controller v0.8.0/go.mod h1:alotCQRwX4M6VEwEgO53FB6nGLSlvah6L0pWxSRslIk= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20230606213221-6ccacf85c2c5 h1:ijnbL4tF6wIxIUPV8GPGI3hak8nlHusDkOQbHrpg0w0= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20230606213221-6ccacf85c2c5/go.mod h1:ENmaVhZWCR7pIhdtP3b7BVGBqoJnu+kEEJI+moDJTJE= -github.com/open-policy-agent/opa v0.51.0 h1:2hS5xhos8HtkN+mgpqMhNJSFtn/1n/h3wh+AeTPJg6Q= -github.com/open-policy-agent/opa v0.51.0/go.mod h1:OjmwLfXdeR7skSxrt8Yd3ScXTqPxyJn7GeTRJrcEerU= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f h1:dJDnp6A6LBrU/hbve5NzZNV3OzPYXdD0SJUn+xAPj+I= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f/go.mod h1:54/KzLMvA5ndBVpm7B1OjLeV0cUtTLTz2bZ2OtydLpU= +github.com/open-policy-agent/opa v0.54.0 h1:mGEsK+R5ZTMV8fzzbNzmYDGbTmY30wmRCIHmtm2VqWs= +github.com/open-policy-agent/opa v0.54.0/go.mod h1:d8I8jWygKGi4+T4H07qrbeCdH1ITLsEfT0M+bsvxWw0= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1029,8 +1031,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1070,8 +1072,8 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/prometheus v0.35.0 h1:N93oX6BrJ2iP3UuE2Uz4Lt+5BkUpaFer3L9CbADzesc= github.com/prometheus/prometheus v0.35.0/go.mod h1:7HaLx5kEPKJ0GDgbODG0fZgXbQ8K/XjZNJXQmbmgQlY= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= @@ -1111,8 +1113,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1129,8 +1131,8 @@ github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKv github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1159,8 +1161,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1202,8 +1204,8 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg= -github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1338,7 +1340,7 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1447,8 +1449,8 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1467,8 +1469,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1611,15 +1613,15 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1629,8 +1631,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1760,8 +1762,8 @@ google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.108.0 h1:WVBc/faN0DkKtR43Q/7+tPny9ZoLZdIiAyG5Q9vFClg= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1844,8 +1846,8 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1880,8 +1882,8 @@ google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/main.go b/main.go index 3474139b8d7..7669e5aec66 100644 --- a/main.go +++ b/main.go @@ -16,6 +16,7 @@ limitations under the License. package main import ( + "context" "crypto/tls" "crypto/x509" "errors" @@ -93,22 +94,23 @@ var ( ) var ( - logFile = flag.String("log-file", "", "Log to file, if specified. Default is to log to stderr.") - logLevel = flag.String("log-level", "INFO", "Minimum log level. For example, DEBUG, INFO, WARNING, ERROR. Defaulted to INFO if unspecified.") - logLevelKey = flag.String("log-level-key", "level", "JSON key for the log level field, defaults to `level`") - logLevelEncoder = flag.String("log-level-encoder", "lower", "Encoder for the value of the log level field. Valid values: [`lower`, `capital`, `color`, `capitalcolor`], default: `lower`") - healthAddr = flag.String("health-addr", ":9090", "The address to which the health endpoint binds.") - metricsAddr = flag.String("metrics-addr", "0", "The address the metric endpoint binds to.") - port = flag.Int("port", 443, "port for the server. defaulted to 443 if unspecified ") - host = flag.String("host", "", "the host address the webhook server listens on. defaults to all addresses.") - certDir = flag.String("cert-dir", "/certs", "The directory where certs are stored, defaults to /certs") - disableCertRotation = flag.Bool("disable-cert-rotation", false, "disable automatic generation and rotation of webhook TLS certificates/keys") - enableProfile = flag.Bool("enable-pprof", false, "enable pprof profiling") - profilePort = flag.Int("pprof-port", 6060, "port for pprof profiling. defaulted to 6060 if unspecified") - certServiceName = flag.String("cert-service-name", "gatekeeper-webhook-service", "The service name used to generate the TLS cert's hostname. Defaults to gatekeeper-webhook-service") - enableTLSHealthcheck = flag.Bool("enable-tls-healthcheck", false, "enable probing webhook API with certificate stored in certDir") - disabledBuiltins = util.NewFlagSet() - enableK8sCel = flag.Bool("experimental-enable-k8s-native-validation", false, "PROTOTYPE (not stable): enable the validating admission policy driver") + logFile = flag.String("log-file", "", "Log to file, if specified. Default is to log to stderr.") + logLevel = flag.String("log-level", "INFO", "Minimum log level. For example, DEBUG, INFO, WARNING, ERROR. Defaulted to INFO if unspecified.") + logLevelKey = flag.String("log-level-key", "level", "JSON key for the log level field, defaults to `level`") + logLevelEncoder = flag.String("log-level-encoder", "lower", "Encoder for the value of the log level field. Valid values: [`lower`, `capital`, `color`, `capitalcolor`], default: `lower`") + healthAddr = flag.String("health-addr", ":9090", "The address to which the health endpoint binds.") + metricsAddr = flag.String("metrics-addr", "0", "The address the metric endpoint binds to.") + port = flag.Int("port", 443, "port for the server. defaulted to 443 if unspecified ") + host = flag.String("host", "", "the host address the webhook server listens on. defaults to all addresses.") + certDir = flag.String("cert-dir", "/certs", "The directory where certs are stored, defaults to /certs") + disableCertRotation = flag.Bool("disable-cert-rotation", false, "disable automatic generation and rotation of webhook TLS certificates/keys") + enableProfile = flag.Bool("enable-pprof", false, "enable pprof profiling") + profilePort = flag.Int("pprof-port", 6060, "port for pprof profiling. defaulted to 6060 if unspecified") + certServiceName = flag.String("cert-service-name", "gatekeeper-webhook-service", "The service name used to generate the TLS cert's hostname. Defaults to gatekeeper-webhook-service") + enableTLSHealthcheck = flag.Bool("enable-tls-healthcheck", false, "enable probing webhook API with certificate stored in certDir") + disabledBuiltins = util.NewFlagSet() + enableK8sCel = flag.Bool("experimental-enable-k8s-native-validation", false, "PROTOTYPE (not stable): enable the validating admission policy driver") + externaldataProviderResponseCacheTTL = flag.Duration("external-data-provider-response-cache-ttl", 3*time.Minute, "TTL for the external data provider response cache. Specify the duration in 'h', 'm', or 's' for hours, minutes, or seconds respectively. Defaults to 3 minutes if unspecified.") ) func init() { @@ -302,14 +304,15 @@ func innerMain() int { // Setup controllers asynchronously, they will block for certificate generation if needed. setupErr := make(chan error) + ctx := ctrl.SetupSignalHandler() go func() { - setupErr <- setupControllers(mgr, sw, tracker, setupFinished) + setupErr <- setupControllers(ctx, mgr, sw, tracker, setupFinished) }() setupLog.Info("starting manager") mgrErr := make(chan error) go func() { - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") mgrErr <- err } @@ -348,7 +351,7 @@ blockingLoop: return 0 } -func setupControllers(mgr ctrl.Manager, sw *watch.ControllerSwitch, tracker *readiness.Tracker, setupFinished chan struct{}) error { +func setupControllers(ctx context.Context, mgr ctrl.Manager, sw *watch.ControllerSwitch, tracker *readiness.Tracker, setupFinished chan struct{}) error { // Block until the setup (certificate generation) finishes. <-setupFinished @@ -360,6 +363,14 @@ func setupControllers(mgr ctrl.Manager, sw *watch.ControllerSwitch, tracker *rea args = append(args, rego.AddExternalDataProviderCache(providerCache)) mutationOpts.ProviderCache = providerCache + if *externaldataProviderResponseCacheTTL <= 0 { + err := fmt.Errorf("invalid value for external-data-provider-response-cache-ttl: %d", *externaldataProviderResponseCacheTTL) + setupLog.Error(err, "unable to create external data provider response cache") + return err + } + providerResponseCache := frameworksexternaldata.NewProviderResponseCache(ctx, *externaldataProviderResponseCacheTTL) + args = append(args, rego.AddExternalDataProviderResponseCache(providerResponseCache)) + certFile := filepath.Join(*certDir, certName) keyFile := filepath.Join(*certDir, keyName) diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 7dd6a0aa86f..a5b020992b8 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.15.1" +const Version = "1.19.1" diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go index 6c9ecc0b538..b841f13a799 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go index f94ea8053c8..4633f5dabd2 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,6 +19,11 @@ // // Manages your Cloud Monitoring data and configurations. // +// # General documentation +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +// // # Example usage // // To get started with this package, create a client. @@ -79,9 +84,6 @@ // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. -// -// For information about setting deadlines, reusing contexts, and more -// please visit https://pkg.go.dev/cloud.google.com/go. package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2" import ( diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json index ad03f6ffbcf..a33cb6fcf53 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json @@ -264,6 +264,35 @@ } } }, + "SnoozeService": { + "clients": { + "grpc": { + "libraryClient": "SnoozeClient", + "rpcs": { + "CreateSnooze": { + "methods": [ + "CreateSnooze" + ] + }, + "GetSnooze": { + "methods": [ + "GetSnooze" + ] + }, + "ListSnoozes": { + "methods": [ + "ListSnoozes" + ] + }, + "UpdateSnooze": { + "methods": [ + "UpdateSnooze" + ] + } + } + } + } + }, "UptimeCheckService": { "clients": { "grpc": { diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go index 4375e1b467f..f1e66ab498c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go index 872d6d3743c..a2e8b3e92c1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go index fb5e35f5d5d..1b823204f98 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/alert.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go index f47133df016..93e481095c3 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/alert_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go index d6c40bd7f96..2ee80c11fa3 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/common.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go index 1bdd70e3d92..90ec564bdb6 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/dropped_labels.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go index df1310325f3..fc38be341fe 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/group.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go index 2340c1cb9c9..d7c61bd7a5f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/group_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go index 321e81afe7a..217c3512563 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/metric.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go index 5939178fbba..abf7710514a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/metric_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go index 79e8485c034..b7d10ef82ef 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/mutation_record.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go index 722c9805645..a030a5bcb67 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/notification.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go index 72821e4bdaf..6ccc289a7ad 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/notification_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go index 7c62db39ea5..42046afd5e6 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/query_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go index 5d162598029..75beee4ba91 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go index e714a59b6a2..50a22ccf579 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/service_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go new file mode 100644 index 00000000000..f3f71e12ed9 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -0,0 +1,314 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/monitoring/v3/snooze.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `Snooze` will prevent any alerts from being opened, and close any that +// are already open. The `Snooze` will work on alerts that match the +// criteria defined in the `Snooze`. The `Snooze` will be active from +// `interval.start_time` through `interval.end_time`. +type Snooze struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the `Snooze`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + // + // The ID of the `Snooze` will be generated by the system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. This defines the criteria for applying the `Snooze`. See + // `Criteria` for more information. + Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"` + // Required. The `Snooze` will be active from `interval.start_time` through + // `interval.end_time`. + // `interval.start_time` cannot be in the past. There is a 15 second clock + // skew to account for the time it takes for a request to reach the API from + // the UI. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Required. A display name for the `Snooze`. This can be, at most, 512 + // unicode characters. + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` +} + +func (x *Snooze) Reset() { + *x = Snooze{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze) ProtoMessage() {} + +func (x *Snooze) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze.ProtoReflect.Descriptor instead. +func (*Snooze) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0} +} + +func (x *Snooze) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Snooze) GetCriteria() *Snooze_Criteria { + if x != nil { + return x.Criteria + } + return nil +} + +func (x *Snooze) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Snooze) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The +// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s +// whose names are supplied. +type Snooze_Criteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The specific `AlertPolicy` names for the alert that should be snoozed. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] + // + // There is a limit of 10 policies per snooze. This limit is checked during + // snooze creation. + Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` +} + +func (x *Snooze_Criteria) Reset() { + *x = Snooze_Criteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze_Criteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze_Criteria) ProtoMessage() {} + +func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead. +func (*Snooze_Criteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Snooze_Criteria) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, 0x02, 0x0a, 0x06, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x52, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x4a, 0xea, 0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x7d, 0x42, 0xc3, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, + 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_snooze_proto_goTypes = []interface{}{ + (*Snooze)(nil), // 0: google.monitoring.v3.Snooze + (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria + (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval +} +var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria + 2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_proto_init() } +func file_google_monitoring_v3_snooze_proto_init() { + if File_google_monitoring_v3_snooze_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Snooze); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Snooze_Criteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_snooze_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_proto = out.File + file_google_monitoring_v3_snooze_proto_rawDesc = nil + file_google_monitoring_v3_snooze_proto_goTypes = nil + file_google_monitoring_v3_snooze_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go new file mode 100644 index 00000000000..998032fbae2 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -0,0 +1,867 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/monitoring/v3/snooze_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message definition for creating a `Snooze`. Users must provide the body +// of the `Snooze` to be created but must omit the `Snooze` field, `name`. +type CreateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // a `Snooze` should be created. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The `Snooze` to create. Omit the `name` field, as it will be + // filled in by the API. + Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"` +} + +func (x *CreateSnoozeRequest) Reset() { + *x = CreateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnoozeRequest) ProtoMessage() {} + +func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSnoozeRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +// The message definition for listing `Snooze`s associated with the given +// `parent`, satisfying the optional `filter`. +type ListSnoozesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // `Snooze`s should be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional filter to restrict results to the given criteria. The + // following fields are supported. + // + // - `interval.start_time` + // - `interval.end_time` + // + // For example: + // + // ``` + // interval.start_time > "2022-03-11T00:00:00-08:00" AND + // interval.end_time < "2022-03-12T00:00:00-08:00" + // ``` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. The maximum number of results to return for a single query. The + // server may further constrain the maximum number of results returned in a + // single page. The value should be in the range [1, 1000]. If the value given + // is outside this range, the server will decide the number of results to be + // returned. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The `next_page_token` from a previous call to + // `ListSnoozesRequest` to get the next page of results. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSnoozesRequest) Reset() { + *x = ListSnoozesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesRequest) ProtoMessage() {} + +func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead. +func (*ListSnoozesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListSnoozesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListSnoozesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListSnoozesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSnoozesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The results of a successful `ListSnoozes` call, containing the matching +// `Snooze`s. +type ListSnoozesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Snooze`s matching this list call. + Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"` + // Page token for repeated calls to `ListSnoozes`, to fetch additional pages + // of results. If this is empty or missing, there are no more pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSnoozesResponse) Reset() { + *x = ListSnoozesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesResponse) ProtoMessage() {} + +func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead. +func (*ListSnoozesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListSnoozesResponse) GetSnoozes() []*Snooze { + if x != nil { + return x.Snoozes + } + return nil +} + +func (x *ListSnoozesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The message definition for retrieving a `Snooze`. Users must specify the +// field, `name`, which identifies the `Snooze`. +type GetSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The ID of the `Snooze` to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetSnoozeRequest) Reset() { + *x = GetSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSnoozeRequest) ProtoMessage() {} + +func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead. +func (*GetSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GetSnoozeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The message definition for updating a `Snooze`. The field, `snooze.name` +// identifies the `Snooze` to be updated. The remainder of `snooze` gives the +// content the `Snooze` in question will be assigned. +// +// What fields can be updated depends on the start time and end time of the +// `Snooze`. +// +// - end time is in the past: These `Snooze`s are considered +// read-only and cannot be updated. +// - start time is in the past and end time is in the future: `display_name` +// and `interval.end_time` can be updated. +// - start time is in the future: `display_name`, `interval.start_time` and +// `interval.end_time` can be updated. +type UpdateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Snooze` to update. Must have the name field present. + Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"` + // Required. The fields to update. + // + // For each field listed in `update_mask`: + // + // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a + // value for that field, the value of the field in the existing `Snooze` + // will be set to the value of the field in the supplied `Snooze`. + // - If the field does not have a value in the supplied `Snooze`, the field + // in the existing `Snooze` is set to its default value. + // + // Fields not listed retain their existing value. + // + // The following are the field names that are accepted in `update_mask`: + // + // - `display_name` + // - `interval.start_time` + // - `interval.end_time` + // + // That said, the start time and end time of the `Snooze` determines which + // fields can legally be updated. Before attempting an update, users should + // consult the documentation for `UpdateSnoozeRequest`, which talks about + // which fields can be updated. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnoozeRequest) Reset() { + *x = UpdateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSnoozeRequest) ProtoMessage() {} + +func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9, + 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x1f, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x3a, 0x06, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, + 0x7a, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x81, 0x01, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x22, 0x2e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x30, 0x32, 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, + 0x7a, 0x65, 0xda, 0x41, 0x12, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xca, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_service_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_monitoring_v3_snooze_service_proto_goTypes = []interface{}{ + (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest + (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest + (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse + (*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest + (*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest + (*Snooze)(nil), // 5: google.monitoring.v3.Snooze + (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask +} +var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze + 5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest + 1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest + 3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest + 4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest + 5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze + 2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse + 5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze + 5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze + 8, // [8:12] is the sub-list for method output_type + 4, // [4:8] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_service_proto_init() } +func file_google_monitoring_v3_snooze_service_proto_init() { + if File_google_monitoring_v3_snooze_service_proto != nil { + return + } + file_google_monitoring_v3_snooze_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnoozesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnoozesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_service_proto = out.File + file_google_monitoring_v3_snooze_service_proto_rawDesc = nil + file_google_monitoring_v3_snooze_service_proto_goTypes = nil + file_google_monitoring_v3_snooze_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SnoozeServiceClient is the client API for SnoozeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SnoozeServiceClient interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) +} + +type snoozeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient { + return &snoozeServiceClient{cc} +} + +func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) { + out := new(ListSnoozesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnoozeServiceServer is the server API for SnoozeService service. +type SnoozeServiceServer interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) +} + +// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations. +type UnimplementedSnoozeServiceServer struct { +} + +func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented") +} +func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented") +} + +func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) { + s.RegisterService(&_SnoozeService_serviceDesc, srv) +} + +func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnoozesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).GetSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SnoozeService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.SnoozeService", + HandlerType: (*SnoozeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSnooze", + Handler: _SnoozeService_CreateSnooze_Handler, + }, + { + MethodName: "ListSnoozes", + Handler: _SnoozeService_ListSnoozes_Handler, + }, + { + MethodName: "GetSnooze", + Handler: _SnoozeService_GetSnooze_Handler, + }, + { + MethodName: "UpdateSnooze", + Handler: _SnoozeService_UpdateSnooze_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/snooze_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go index cf2bc553642..a8ccf2dc5c7 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/span_context.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go index 03b27696317..827fc0cdb11 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/uptime.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go index f56305af535..ae698bb6e25 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc v3.21.9 // source: google/monitoring/v3/uptime_service.proto package monitoringpb diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go index 240bdbebd58..2c1439ea136 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go index 0d7af17a0a0..35cd76914ae 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go index 3b5b471cf7e..484acc53a2a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go new file mode 100644 index 00000000000..3f8a70b68b2 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -0,0 +1,400 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newSnoozeClientHook clientHook + +// SnoozeCallOptions contains the retry settings for each method of SnoozeClient. +type SnoozeCallOptions struct { + CreateSnooze []gax.CallOption + ListSnoozes []gax.CallOption + GetSnooze []gax.CallOption + UpdateSnooze []gax.CallOption +} + +func defaultSnoozeGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSnoozeCallOptions() *SnoozeCallOptions { + return &SnoozeCallOptions{ + CreateSnooze: []gax.CallOption{}, + ListSnoozes: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetSnooze: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateSnooze: []gax.CallOption{}, + } +} + +// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API. +type internalSnoozeClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator + GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) +} + +// SnoozeClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +type SnoozeClient struct { + // The internal transport-dependent client. + internalClient internalSnoozeClient + + // The call options for this service. + CallOptions *SnoozeCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SnoozeClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SnoozeClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSnooze creates a Snooze that will prevent alerts, which match the provided +// criteria, from being opened. The Snooze applies for a specific time +// interval. +func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.CreateSnooze(ctx, req, opts...) +} + +// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in +// filter, which specifies predicates to match Snoozes. +func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + return c.internalClient.ListSnoozes(ctx, req, opts...) +} + +// GetSnooze retrieves a Snooze by name. +func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.GetSnooze(ctx, req, opts...) +} + +// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the +// given Snooze object. +func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.UpdateSnooze(ctx, req, opts...) +} + +// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type snoozeGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing SnoozeClient + CallOptions **SnoozeCallOptions + + // The gRPC API client. + snoozeClient monitoringpb.SnoozeServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewSnoozeClient creates a new snooze service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) { + clientOpts := defaultSnoozeGRPCClientOptions() + if newSnoozeClientHook != nil { + hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()} + + c := &snoozeGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *snoozeGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *snoozeGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 30000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.CreateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...) + it := &SnoozeIterator{} + req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) { + resp := &monitoringpb.ListSnoozesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.ListSnoozes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetSnoozes(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 30000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.GetSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 30000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.UpdateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SnoozeIterator manages a stream of *monitoringpb.Snooze. +type SnoozeIterator struct { + items []*monitoringpb.Snooze + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnoozeIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) { + var item *monitoringpb.Snooze + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnoozeIterator) bufLen() int { + return len(it.items) +} + +func (it *SnoozeIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go index b0da1501c76..831195eaf17 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go index bb4981e0fb4..accff0f5e47 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go index 0ad9373b3b5..efedadbea25 100644 --- a/vendor/cloud.google.com/go/monitoring/internal/version.go +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.8.0" +const Version = "1.13.0" diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go index 9641f7396f5..0770b91012b 100644 --- a/vendor/cloud.google.com/go/trace/apiv2/doc.go +++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go @@ -21,7 +21,7 @@ // is collected for all App Engine applications by default. Trace data from // other applications can be provided using this API. This library is used to // interact with the Trace API directly. If you are looking to instrument -// your application for Stackdriver Trace, we recommend using OpenCensus. +// your application for Stackdriver Trace, we recommend using OpenTelemetry. // // # General documentation // diff --git a/vendor/cloud.google.com/go/trace/internal/version.go b/vendor/cloud.google.com/go/trace/internal/version.go index 0ad9373b3b5..37e23cdd396 100644 --- a/vendor/cloud.google.com/go/trace/internal/version.go +++ b/vendor/cloud.google.com/go/trace/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.8.0" +const Version = "1.9.0" diff --git a/vendor/github.com/go-ini/ini/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig new file mode 100644 index 00000000000..4a2d9180f96 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 00000000000..588388bda28 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,7 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode +.DS_Store diff --git a/vendor/github.com/go-ini/ini/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml new file mode 100644 index 00000000000..631e369254d --- /dev/null +++ b/vendor/github.com/go-ini/ini/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 00000000000..d361bbcdf5c --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 00000000000..f3b0dae2d29 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 00000000000..30606d9700a --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,43 @@ +# INI + +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.13**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml new file mode 100644 index 00000000000..e02ec84bc05 --- /dev/null +++ b/vendor/github.com/go-ini/ini/codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true + +comment: + layout: 'diff' + +github_checks: false diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go new file mode 100644 index 00000000000..c3a541f1d1b --- /dev/null +++ b/vendor/github.com/go-ini/ini/data_source.go @@ -0,0 +1,76 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" +) + +var ( + _ dataSource = (*sourceFile)(nil) + _ dataSource = (*sourceData)(nil) + _ dataSource = (*sourceReadCloser)(nil) +) + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type %q", s) + } +} diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go new file mode 100644 index 00000000000..48b8e66d6d6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/deprecated.go @@ -0,0 +1,22 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +var ( + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore = SnackCase +) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 00000000000..f66bc94b8b6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,49 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 00000000000..f8b22408be5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,541 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of one or more INI files in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // To keep track of the index of a section with same name. + // This meta list is only used with non-unique section names are allowed. + sectionIndexes []int + + // Actual data is stored here. + sections map[string][]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + if len(opts.KeyValueDelimiterOnWrite) == 0 { + opts.KeyValueDelimiterOnWrite = "=" + } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } + + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string][]*Section), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty(opts ...LoadOptions) *File { + var opt LoadOptions + if len(opts) > 0 { + opt = opts[0] + } + + // Ignore error here, we are sure our data is good. + f, _ := LoadSources(opt, []byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("empty section name") + } + + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { + return f.sections[name][0], nil + } + + f.sectionList = append(f.sectionList, name) + + // NOTE: Append to indexes must happen before appending to sections, + // otherwise index will have off-by-one problem. + f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) + + sec := newSection(f, name) + f.sections[name] = append(f.sections[name], sec) + + return sec, nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + secs, err := f.SectionsByName(name) + if err != nil { + return nil, err + } + + return secs[0], err +} + +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + +// SectionsByName returns all sections with given name. +func (f *File) SectionsByName(name string) ([]*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + secs := f.sections[name] + if len(secs) == 0 { + return nil, fmt.Errorf("section %q does not exist", name) + } + + return secs, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + if name == "" { + name = DefaultSection + } + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// SectionWithIndex assumes named section exists and returns a new section when not. +func (f *File) SectionWithIndex(name string, index int) *Section { + secs, err := f.SectionsByName(name) + if err != nil || len(secs) <= index { + // NOTE: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + newSec, _ := f.NewSection(name) + return newSec + } + + return secs[index] +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name][f.sectionIndexes[i]] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section or all sections with given name. +func (f *File) DeleteSection(name string) { + secs, err := f.SectionsByName(name) + if err != nil { + return + } + + for i := 0; i < len(secs); i++ { + // For non-unique sections, it is always needed to remove the first one so + // in the next iteration, the subsequent section continue having index 0. + // Ignoring the error as index 0 never returns an error. + _ = f.DeleteSectionWithIndex(name, 0) + } +} + +// DeleteSectionWithIndex deletes a section with given name and index. +func (f *File) DeleteSectionWithIndex(name string, index int) error { + if !f.options.AllowNonUniqueSections && index != 0 { + return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") + } + + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + // Count occurrences of the sections + occurrences := 0 + + sectionListCopy := make([]string, len(f.sectionList)) + copy(sectionListCopy, f.sectionList) + + for i, s := range sectionListCopy { + if s != name { + continue + } + + if occurrences == index { + if len(f.sections[name]) <= 1 { + delete(f.sections, name) // The last one in the map + } else { + f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) + } + + // Fix section lists + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) + + } else if occurrences > index { + // Fix the indices of all following sections with this name. + f.sectionIndexes[i-1]-- + } + + occurrences++ + } + + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + _ = f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + if f.options.ShortCircuit { + return nil + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 + for i, sname := range f.sectionList { + sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + isLastSection := i == lastSectionIdx + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modified if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KeyList: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + writeKeyValue := func(val string) (bool, error) { + if _, err := buf.WriteString(kname); err != nil { + return false, err + } + + if key.isBooleanType { + buf.WriteString(LineBreak) + return true, nil + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename after done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go new file mode 100644 index 00000000000..f9d80a682a5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/helper.go @@ -0,0 +1,24 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 00000000000..99e7f86511a --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,176 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "os" + "regexp" + "runtime" + "strings" +) + +const ( + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 +) + +var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" +) + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func init() { + if runtime.GOOS == "windows" && !inTest { + LineBreak = "\r\n" + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". + KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool + // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). + DebugFunc DebugFunc + // ReaderBufferSize is the buffer size of the reader in bytes. + ReaderBufferSize int + // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. + AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool +} + +// DebugFunc is the type of function called to log parse events. +type DebugFunc func(message string) + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 00000000000..a19d9f38ef1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,837 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { + return nil + } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } + return []string{k.value} + } + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + // If not found or found the key itself, then search again in default section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + nk, _ = k.s.f.Section("").GetKey(noption) + if nk == nil { + // Stop when no results found in the default section, + // and returns the value as-is. + break + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Bools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidBools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictBools returns list of bool divided by given delimiter or error on first invalid input. +func (k *Key) StrictBools(delim string) ([]bool, error) { + return k.parseBools(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseBools transforms strings to bools. +func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { + vals := make([]bool, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := parseBool(str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) + } + } + return vals, err +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseFloat(str, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) + } + } + return vals, err +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) + } + } + return vals, err +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) + } + } + return vals, err +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) + } + } + return vals, err +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) + } + } + return vals, err +} + +type Parser func(str string) (interface{}, error) + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 00000000000..44fc526c2cb --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,520 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +const minReaderBufferSize = 4096 + +var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool + DebugFunc DebugFunc + ReaderBufferSize int +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func (p *parser) debug(format string, args ...interface{}) { + if p.options.DebugFunc != nil { + p.options.DebugFunc(fmt.Sprintf(format, args...)) + } +} + +func newParser(r io.Reader, opts parserOptions) *parser { + size := opts.ReaderBufferSize + if size < minReaderBufferSize { + size = minReaderBufferSize + } + + return &parser{ + buf: bufio.NewReaderSize(r, size), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + _, err = p.buf.Read(mask) + if err != nil { + return err + } + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + _, err = p.buf.Read(mask) + if err != nil { + return err + } + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + var endIdx int + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && line[0:3] == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil && peekErr != io.EOF { + p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) + return "", peekErr + } + + p.debug("readPythonMultilines: parsing %q", string(peekData)) + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) + for n, v := range peekMatches { + p.debug(" %d: %q", n, v) + } + + // Return if not a Python multiline value. + if len(peekMatches) != 3 { + p.debug("readPythonMultilines: end of value, got: %q", line) + return line, nil + } + + // Advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.buf.Discard(len(peekData)) + if err != nil { + p.debug("readPythonMultilines: failed to skip to the end, returning error") + return "", err + } + + line += "\n" + peekMatches[0] + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + DebugFunc: f.options.DebugFunc, + ReaderBufferSize: f.options.ReaderBufferSize, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 4kb at a time. + currentPeekSize := minReaderBufferSize + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset auto-counter and comments + p.comment.Reset() + p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + switch { + // Treat as boolean key when desired, and whole line is key name. + case IsErrDelimiterNotFound(err): + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 00000000000..a3615d820b7 --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := make(map[string]string, len(s.keysHash)) + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + s.f.options.ChildSectionDelimiter + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]...) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 00000000000..a486b2fe0fd --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,747 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // SnackCase converts to format SNACK_CASE. + SnackCase NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= 'A' - 'a' + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflect.Bool: + vals, err = key.parseBools(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflect.Bool: + slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + vt := t + isPtr := t.Kind() == reflect.Ptr + if isPtr { + vt = t.Elem() + } + switch vt.Kind() { + case reflect.String: + stringVal := key.String() + if isPtr { + field.Set(reflect.ValueOf(&stringVal)) + } else if len(stringVal) > 0 { + field.SetString(key.String()) + } + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&boolVal)) + } else { + field.SetBool(boolVal) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // ParseDuration will not return err for `0`, so check the type name + if vt.Name() == "Duration" { + durationVal, err := key.Duration() + if err != nil { + if intVal, err := key.Int64(); err == nil { + field.SetInt(intVal) + return nil + } + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else if int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetInt(intVal) + field.Set(pv) + } else { + field.SetInt(intVal) + } + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetUint(uintVal) + field.Set(pv) + } else { + field.SetUint(uintVal) + } + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetFloat(floatVal) + field.Set(pv) + } else { + field.SetFloat(floatVal) + } + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&timeVal)) + } else { + field.Set(reflect.ValueOf(timeVal)) + } + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) + rawName = opts[0] + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") + } + return rawName, omitEmpty, allowShadow, allowNonUnique, extends +} + +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + continue + } + } + + // Map non-unique sections + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + newField, err := s.mapToSlice(fieldName, field, isStrict) + if err != nil { + return fmt.Errorf("map to slice %q: %v", fieldName, err) + } + + field.Set(newField) + continue + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("set field %q: %v", fieldName, err) + } + } + } + return nil +} + +// mapToSlice maps all sections with the same name and returns the new value. +// The type of the Value must be a slice. +func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { + secs, err := s.f.SectionsByName(secName) + if err != nil { + return reflect.Value{}, err + } + + typ := val.Type().Elem() + for i, sec := range secs { + elem := reflect.New(typ) + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { + return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) + } + + val = reflect.Append(val, elem.Elem()) + } + return val, nil +} + +// mapTo maps a section to object v. +func (s *Section) mapTo(v interface{}, isStrict bool) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + if typ.Kind() == reflect.Slice { + newField, err := s.mapToSlice(s.name, val, isStrict) + if err != nil { + return err + } + + val.Set(newField) + return nil + } + + return s.mapToField(val, isStrict, 0, s.name) +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + return s.mapTo(v, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + return s.mapTo(v, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflect.Bool: + val = fmt.Sprint(slice.Index(i).Bool()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + _ = keyWithShadows.AddShadow(val) + } + } + *key = *keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflect.Bool: + buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + for i := 0; i < field.Len(); i++ { + if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { + return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) + } + + sec, err := s.f.NewSection(fieldName) + if err != nil { + return err + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err := sec.reflectFrom(slice.Index(i)); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + } + continue + } + + // Note: Same reason as section. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("reflect field %q: %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects section from given struct. It overwrites existing ones. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + + if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && + (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { + // Clear sections to make sure none exists before adding the new ones + s.f.DeleteSection(s.name) + + if typ.Kind() == reflect.Ptr { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + return sec.reflectFrom(val.Elem()) + } + + slice := val.Slice(0, val.Len()) + sliceOf := val.Type().Elem().Kind() + if sliceOf != reflect.Ptr { + return fmt.Errorf("not a slice of pointers") + } + + for i := 0; i < slice.Len(); i++ { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + + err = sec.reflectFrom(slice.Index(i)) + if err != nil { + return fmt.Errorf("reflect from %dth field: %v", i, err) + } + } + + return nil + } + + if typ.Kind() == reflect.Ptr { + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index aecaff59ee8..b3283b81588 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -1,6 +1,15 @@ // Copyright 2022 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // @@ -13,10 +22,9 @@ import ( "crypto/rsa" "crypto/x509" "encoding/gob" + "errors" "fmt" "io" - "io/ioutil" - "log" "net/rpc" "os" "os/exec" @@ -44,17 +52,6 @@ func (c *Connection) Close() error { return werr } -// If ECP Logging is enabled return true -// Otherwise return false -func enableECPLogging() bool { - if os.Getenv("ENABLE_ENTERPRISE_CERTIFICATE_LOGS") != "" { - return true - } - - log.SetOutput(ioutil.Discard) - return false -} - func init() { gob.Register(crypto.SHA256) gob.Register(&rsa.PSSOptions{}) @@ -87,7 +84,7 @@ func (k *Key) Close() error { } // Wait for cmd to exit and release resources. Since the process is forcefully killed, this // will return a non-nil error (varies by OS), which we will ignore. - k.cmd.Wait() + _ = k.cmd.Wait() // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. if err := k.client.Close(); err.Error() != "close |0: file already closed" { @@ -110,6 +107,10 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ return } +// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, +// possibly due to missing config or missing binary path. +var ErrCredUnavailable = errors.New("Cred is unavailable") + // Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate // related operations, including signing messages with the private key. // @@ -118,12 +119,14 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ // // The config file also specifies which certificate the signer should use. func Cred(configFilePath string) (*Key, error) { - enableECPLogging() if configFilePath == "" { configFilePath = util.GetDefaultConfigFilePath() } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { + if errors.Is(err, util.ErrConfigUnavailable) { + return nil, ErrCredUnavailable + } return nil, err } k := &Key{ diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index ccef5278a30..1640ec1c9e3 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -1,10 +1,23 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package util provides helper functions for the client. package util import ( "encoding/json" "errors" - "io/ioutil" + "io" "os" "os/user" "path/filepath" @@ -23,14 +36,21 @@ type Libs struct { ECP string `json:"ecp"` } +// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, +// possibly due to entire config missing or missing binary path. +var ErrConfigUnavailable = errors.New("Config is unavailable") + // LoadSignerBinaryPath retrieves the path of the signer binary from the config file. func LoadSignerBinaryPath(configFilePath string) (path string, err error) { jsonFile, err := os.Open(configFilePath) if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", ErrConfigUnavailable + } return "", err } - byteValue, err := ioutil.ReadAll(jsonFile) + byteValue, err := io.ReadAll(jsonFile) if err != nil { return "", err } @@ -41,7 +61,7 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { } signerBinaryPath := config.Libs.ECP if signerBinaryPath == "" { - return "", errors.New("signer binary path is missing") + return "", ErrConfigUnavailable } return signerBinaryPath, nil } diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index d88960b7ef1..10295639c5a 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.7.0" + "v2": "2.7.1" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index b75170f2227..41a7ca94d4d 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + ## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index aa6be1304f1..ed862c8b398 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -39,6 +39,7 @@ import ( jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" "google.golang.org/api/googleapi" "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" @@ -197,12 +198,12 @@ func (a *APIError) Unwrap() error { // Error returns a readable representation of the APIError. func (a *APIError) Error() string { var msg string - if a.status != nil { - msg = a.err.Error() - } else if a.httpErr != nil { + if a.httpErr != nil { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil { + msg = a.err.Error() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } @@ -236,6 +237,9 @@ func (a *APIError) Metadata() map[string]string { // setDetailsFromError parses a Status error or a googleapi.Error // and sets status and details or httpErr and details, respectively. // It returns false if neither Status nor googleapi.Error can be parsed. +// When err is a googleapi.Error, the status of the returned error will +// be set to an Unknown error, rather than nil, since a nil code is +// interpreted as OK in the gRPC status package. func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error @@ -248,6 +252,7 @@ func (a *APIError) setDetailsFromError(err error) bool { case isHTTPErr: a.httpErr = herr a.details = parseHTTPDetails(herr) + a.status = status.New(codes.Unknown, herr.Message) default: return false } diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 0ba5da1dd1e..936873ec4f8 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.7.0" +const Version = "2.7.1" diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go index 9d2d8a4bab9..06a91f0868b 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go index 336142a5e3e..0c568802164 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -1,81 +1,32 @@ -// +build windows -// +build !go1.4 - package mousetrap import ( - "fmt" - "os" "syscall" "unsafe" ) -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err } - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err } } } -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - // StartedByExplorer returns true if the program was invoked by the user double-clicking // on the executable from explorer.exe // @@ -83,16 +34,9 @@ func getppid() (pid int, err error) { // It does not guarantee that the program was run from a terminal. It only can tell you // whether it was launched from explorer.exe func StartedByExplorer() bool { - ppid, err := getppid() + pe, err := getProcessEntry(syscall.Getppid()) if err != nil { return false } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) } diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3c3..00000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/args.go b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/args.go index 59215355bfb..9a7ab8a3e37 100644 --- a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/args.go +++ b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/args.go @@ -92,6 +92,14 @@ func AddExternalDataProviderCache(providerCache *externaldata.ProviderCache) Arg } } +func AddExternalDataProviderResponseCache(providerResponseCache *externaldata.ProviderResponseCache) Arg { + return func(d *Driver) error { + d.providerResponseCache = providerResponseCache + + return nil + } +} + func DisableBuiltins(builtins ...string) Arg { return func(d *Driver) error { if d.compilers.capabilities == nil { diff --git a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/builtin.go b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/builtin.go index 106ea1fce1b..17e161e1791 100644 --- a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/builtin.go +++ b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/builtin.go @@ -2,12 +2,18 @@ package rego import ( "net/http" + "time" "github.com/open-policy-agent/frameworks/constraint/pkg/externaldata" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/rego" ) +const ( + providerResponseAPIVersion = "externaldata.gatekeeper.sh/v1beta1" + providerResponseKind = "ProviderResponse" +) + func externalDataBuiltin(d *Driver) func(bctx rego.BuiltinContext, regorequest *ast.Term) (*ast.Term, error) { return func(bctx rego.BuiltinContext, regorequest *ast.Term) (*ast.Term, error) { var regoReq externaldata.RegoRequest @@ -25,12 +31,78 @@ func externalDataBuiltin(d *Driver) func(bctx rego.BuiltinContext, regorequest * return externaldata.HandleError(http.StatusBadRequest, err) } - externaldataResponse, statusCode, err := d.sendRequestToProvider(bctx.Context, &provider, regoReq.Keys, clientCert) - if err != nil { - return externaldata.HandleError(statusCode, err) + // check provider response cache + var providerRequestKeys []string + var providerResponseStatusCode int + var prepareResponse externaldata.Response + + prepareResponse.Idempotent = true + for _, k := range regoReq.Keys { + cachedResponse, err := d.providerResponseCache.Get( + externaldata.CacheKey{ + ProviderName: regoReq.ProviderName, + Key: k, + }, + ) + if err != nil || time.Since(time.Unix(cachedResponse.Received, 0)) > d.providerResponseCache.TTL { + // key is not found or cache entry is stale, add key to the provider request keys + providerRequestKeys = append(providerRequestKeys, k) + } else { + prepareResponse.Items = append( + prepareResponse.Items, externaldata.Item{ + Key: k, + Value: cachedResponse.Value, + Error: cachedResponse.Error, + }, + ) + + // we are taking conservative approach here, if any of the cached response is not idempotent + // we will mark the whole response as not idempotent + if !cachedResponse.Idempotent { + prepareResponse.Idempotent = false + } + } + } + + if len(providerRequestKeys) > 0 { + externaldataResponse, statusCode, err := d.sendRequestToProvider(bctx.Context, &provider, providerRequestKeys, clientCert) + if err != nil { + return externaldata.HandleError(statusCode, err) + } + + for _, item := range externaldataResponse.Response.Items { + d.providerResponseCache.Upsert( + externaldata.CacheKey{ + ProviderName: regoReq.ProviderName, + Key: item.Key, + }, + externaldata.CacheValue{ + Received: time.Now().Unix(), + Value: item.Value, + Error: item.Error, + Idempotent: externaldataResponse.Response.Idempotent, + }, + ) + } + + // we are taking conservative approach here, if any of the response is not idempotent + // we will mark the whole response as not idempotent + if !externaldataResponse.Response.Idempotent { + prepareResponse.Idempotent = false + } + + prepareResponse.Items = append(prepareResponse.Items, externaldataResponse.Response.Items...) + prepareResponse.SystemError = externaldataResponse.Response.SystemError + providerResponseStatusCode = statusCode + } + + providerResponse := &externaldata.ProviderResponse{ + APIVersion: providerResponseAPIVersion, + Kind: providerResponseKind, + Response: prepareResponse, } - regoResponse := externaldata.NewRegoResponse(statusCode, externaldataResponse) + regoResponse := externaldata.NewRegoResponse(providerResponseStatusCode, providerResponse) return externaldata.PrepareRegoResponse(regoResponse) } } diff --git a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/driver.go b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/driver.go index 751b3a8e2a6..514bb4f7a2c 100644 --- a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/driver.go +++ b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/rego/driver.go @@ -73,6 +73,9 @@ type Driver struct { // providerCache allows Rego to read from external_data in Rego queries. providerCache *externaldata.ProviderCache + // providerResponseCache allows to cache responses from external_data providers. + providerResponseCache *externaldata.ProviderResponseCache + // sendRequestToProvider allows Rego to send requests to the provider specified in external_data. sendRequestToProvider externaldata.SendRequestToProvider diff --git a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/externaldata/cache.go b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/externaldata/cache.go index d69f1e2688c..effa7b2131e 100644 --- a/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/externaldata/cache.go +++ b/vendor/github.com/open-policy-agent/frameworks/constraint/pkg/externaldata/cache.go @@ -1,14 +1,17 @@ package externaldata import ( + "context" "crypto/x509" "encoding/base64" "encoding/pem" "fmt" "net/url" "sync" + "time" "github.com/open-policy-agent/frameworks/constraint/pkg/apis/externaldata/unversioned" + "k8s.io/apimachinery/pkg/util/wait" ) type ProviderCache struct { @@ -16,6 +19,73 @@ type ProviderCache struct { mux sync.RWMutex } +type ProviderResponseCache struct { + cache sync.Map + TTL time.Duration +} + +type CacheKey struct { + ProviderName string + Key string +} + +type CacheValue struct { + Received int64 + Value interface{} + Error string + Idempotent bool +} + +func NewProviderResponseCache(ctx context.Context, ttl time.Duration) *ProviderResponseCache { + providerResponseCache := &ProviderResponseCache{ + cache: sync.Map{}, + TTL: ttl, + } + + go wait.UntilWithContext(ctx, func(ctx context.Context) { + providerResponseCache.invalidateProviderResponseCache(providerResponseCache.TTL) + }, ttl) + + return providerResponseCache +} + +func (c *ProviderResponseCache) Get(key CacheKey) (*CacheValue, error) { + if v, ok := c.cache.Load(key); ok { + value, ok := v.(*CacheValue) + if !ok { + return nil, fmt.Errorf("value is not of type CacheValue") + } + return value, nil + } + return nil, fmt.Errorf("key '%s:%s' is not found in provider response cache", key.ProviderName, key.Key) +} + +func (c *ProviderResponseCache) Upsert(key CacheKey, value CacheValue) { + c.cache.Store(key, &value) +} + +func (c *ProviderResponseCache) Remove(key CacheKey) { + c.cache.Delete(key) +} + +func (c *ProviderResponseCache) invalidateProviderResponseCache(ttl time.Duration) { + c.cache.Range(func(k, v interface{}) bool { + value, ok := v.(*CacheValue) + if !ok { + return false + } + + if time.Since(time.Unix(value.Received, 0)) > ttl { + key, ok := k.(CacheKey) + if !ok { + return false + } + c.Remove(key) + } + return true + }) +} + func NewCache() *ProviderCache { return &ProviderCache{ cache: make(map[string]unversioned.Provider), diff --git a/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/ast/builtins.go index 321012d4441..fa882da627d 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/ast/builtins.go @@ -210,10 +210,12 @@ var DefaultBuiltins = [...]*Builtin{ CryptoSha256, CryptoX509ParseCertificateRequest, CryptoX509ParseRSAPrivateKey, + CryptoX509ParseKeyPair, CryptoHmacMd5, CryptoHmacSha1, CryptoHmacSha256, CryptoHmacSha512, + CryptoHmacEqual, // Graphs WalkBuiltin, @@ -2158,7 +2160,7 @@ var Format = &Builtin{ types.N, types.NewArray([]types.Type{types.N, types.S}, nil), types.NewArray([]types.Type{types.N, types.S, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string (see golang supported time formats)"), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), ), types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), ), @@ -2288,6 +2290,17 @@ var CryptoX509ParseCertificateRequest = &Builtin{ ), } +var CryptoX509ParseKeyPair = &Builtin{ + Name: "crypto.x509.parse_keypair", + Description: "Returns a valid key pair", + Decl: types.NewFunction( + types.Args( + types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), + types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), + ), +} var CryptoX509ParseRSAPrivateKey = &Builtin{ Name: "crypto.x509.parse_rsa_private_key", Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", @@ -2380,6 +2393,18 @@ var CryptoHmacSha512 = &Builtin{ ), } +var CryptoHmacEqual = &Builtin{ + Name: "crypto.hmac.equal", + Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", + Decl: types.NewFunction( + types.Args( + types.Named("mac1", types.S).Description("mac1 to compare"), + types.Named("mac2", types.S).Description("mac2 to compare"), + ), + types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), + ), +} + /** * Graphs. */ diff --git a/vendor/github.com/open-policy-agent/opa/ast/check.go b/vendor/github.com/open-policy-agent/opa/ast/check.go index a2edbe66fa1..217ab78856e 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/check.go +++ b/vendor/github.com/open-policy-agent/opa/ast/check.go @@ -269,7 +269,7 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { } if tpe != nil { - env.tree.Put(path, tpe) + env.tree.Insert(path, tpe) } } diff --git a/vendor/github.com/open-policy-agent/opa/ast/compile.go b/vendor/github.com/open-policy-agent/opa/ast/compile.go index 7292926acc7..1f99e096924 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compile.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compile.go @@ -873,6 +873,7 @@ func (c *Compiler) checkRuleConflicts() { arities := make(map[int]struct{}, len(node.Values)) name := "" var singleValueConflicts []Ref + var multiValueConflicts []Ref for _, rule := range node.Values { r := rule.(*Rule) @@ -891,6 +892,9 @@ func (c *Compiler) checkRuleConflicts() { // // data.p.q[r] { r := input.r } # data.p.q could be { "r": true } // data.p.q.r.s { true } + // + // data.p[r] := x { r = input.key; x = input.bar } + // data.p.q[r] := x { r = input.key; x = input.bar } // But this is allowed: // data.p.q[r] = 1 { r := "r" } @@ -899,7 +903,25 @@ func (c *Compiler) checkRuleConflicts() { if r.Head.RuleKind() == SingleValue && len(node.Children) > 0 { if len(ref) > 1 && !ref[len(ref)-1].IsGround() { // p.q[x] and p.q.s.t => check grandchildren for _, c := range node.Children { + grandchildrenFound := false + + if len(c.Values) > 0 { + childRules := extractRules(c.Values) + for _, childRule := range childRules { + childRef := childRule.Ref() + if childRule.Head.RuleKind() == SingleValue && !childRef[len(childRef)-1].IsGround() { + // The child is a partial object rule, so it's effectively "generating" grandchildren. + grandchildrenFound = true + break + } + } + } + if len(c.Children) > 0 { + grandchildrenFound = true + } + + if grandchildrenFound { singleValueConflicts = node.flattenChildren() break } @@ -908,12 +930,24 @@ func (c *Compiler) checkRuleConflicts() { singleValueConflicts = node.flattenChildren() } } + + // Multi-value rules may not have any other rules in their extent; e.g.: + // + // data.p[v] { v := ... } + // data.p.q := 42 # In direct conflict with data.p[v], which is constructing a set and cannot have values assigned to a sub-path. + + if r.Head.RuleKind() == MultiValue && len(node.Children) > 0 { + multiValueConflicts = node.flattenChildren() + } } switch { case singleValueConflicts != nil: c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "single-value rule %v conflicts with %v", name, singleValueConflicts)) + case multiValueConflicts != nil: + c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "multi-value rule %v conflicts with %v", name, multiValueConflicts)) + case len(kinds) > 1 || len(arities) > 1: c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) @@ -1890,7 +1924,7 @@ func isPrintCall(x *Expr) bool { return x.IsCall() && x.Operator().Equal(Print.Ref()) } -// rewriteTermsInHead will rewrite rules so that the head does not contain any +// rewriteRefsInHead will rewrite rules so that the head does not contain any // terms that require evaluation (e.g., refs or comprehensions). If the key or // value contains one or more of these terms, the key or value will be moved // into the body and assigned to a new variable. The new variable will replace diff --git a/vendor/github.com/open-policy-agent/opa/ast/env.go b/vendor/github.com/open-policy-agent/opa/ast/env.go index e3a4e442843..d5a0706614d 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/env.go +++ b/vendor/github.com/open-policy-agent/opa/ast/env.go @@ -304,6 +304,116 @@ func (n *typeTreeNode) Put(path Ref, tpe types.Type) { curr.value = tpe } +// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. +// If an types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. +// path must be ground. +func (n *typeTreeNode) Insert(path Ref, tpe types.Type) { + curr := n + for i, term := range path { + c, ok := curr.children.Get(term.Value) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } else { + child = c.(*typeTreeNode) + + if child.value != nil && i+1 < len(path) { + // If child has an object value, merge the new value into it. + if o, ok := child.value.(*types.Object); ok { + var err error + child.value, err = insertIntoObject(o, path[i+1:], tpe) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } + } + + curr = child + } + + curr.value = tpe + + if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { + // merge all leafs into the inserted object + leafs := curr.Leafs() + for p, t := range leafs { + var err error + curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } +} + +func insertIntoObject(o *types.Object, path Ref, tpe types.Type) (*types.Object, error) { + if len(path) == 0 { + return o, nil + } + + key, err := JSON(path[0].Value) + if err != nil { + return nil, fmt.Errorf("invalid path term %v: %w", path[0], err) + } + + if len(path) == 1 { + for _, prop := range o.StaticProperties() { + if util.Compare(prop.Key, key) == 0 { + prop.Value = types.Or(prop.Value, tpe) + return o, nil + } + } + staticProps := append(o.StaticProperties(), types.NewStaticProperty(key, tpe)) + return types.NewObject(staticProps, o.DynamicProperties()), nil + } + + for _, prop := range o.StaticProperties() { + if util.Compare(prop.Key, key) == 0 { + if propO := prop.Value.(*types.Object); propO != nil { + prop.Value, err = insertIntoObject(propO, path[1:], tpe) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("cannot insert into non-object type %v", prop.Value) + } + return o, nil + } + } + + child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe) + if err != nil { + return nil, err + } + staticProps := append(o.StaticProperties(), types.NewStaticProperty(key, child)) + return types.NewObject(staticProps, o.DynamicProperties()), nil +} + +func (n *typeTreeNode) Leafs() map[*Ref]types.Type { + leafs := map[*Ref]types.Type{} + n.children.Iter(func(k, v util.T) bool { + collectLeafs(v.(*typeTreeNode), nil, leafs) + return false + }) + return leafs +} + +func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { + nPath := append(path, NewTerm(n.key)) + if n.Leaf() { + leafs[&nPath] = n.Value() + return + } + n.children.Iter(func(k, v util.T) bool { + collectLeafs(v.(*typeTreeNode), nPath, leafs) + return false + }) +} + func (n *typeTreeNode) Value() types.Type { return n.value } diff --git a/vendor/github.com/open-policy-agent/opa/ast/index.go b/vendor/github.com/open-policy-agent/opa/ast/index.go index 407f750ad7b..e14bb72eefc 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/index.go +++ b/vendor/github.com/open-policy-agent/opa/ast/index.go @@ -173,6 +173,7 @@ func (i *baseDocEqIndex) AllRules(resolver ValueResolver) (*IndexResult, error) result := NewIndexResult(i.kind) result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs result.Rules = make([]*Rule, 0, len(tr.ordering)) for _, pos := range tr.ordering { @@ -482,8 +483,10 @@ func (node *trieNode) String() string { if len(node.mappers) > 0 { flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) } - if l := node.values.Len(); l > 0 { - flags = append(flags, fmt.Sprintf("%d value(s)", l)) + if node.values != nil { + if l := node.values.Len(); l > 0 { + flags = append(flags, fmt.Sprintf("%d value(s)", l)) + } } return strings.Join(flags, " ") } @@ -697,12 +700,6 @@ func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalRes return node.Traverse(resolver, tr) } - head := arr.Elem(0).Value - - if !IsScalar(head) { - return nil - } - if node.any != nil { err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) if err != nil { @@ -710,6 +707,12 @@ func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalRes } } + head := arr.Elem(0).Value + + if !IsScalar(head) { + return nil + } + child, ok := node.scalars.Get(head) if !ok { return nil diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go index edc1b63a8e7..99430a8fc57 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go @@ -258,14 +258,17 @@ func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, erro } head.Value = rhs head.Location = lhs.Location + head.setJSONOptions(lhs.jsonOptions) + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + setJSONOptions(body, &rhs.jsonOptions) return &Rule{ - Location: lhs.Location, - Head: head, - Body: NewBody( - NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location), - ), - Module: module, + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + jsonOptions: lhs.jsonOptions, }, nil } @@ -280,14 +283,18 @@ func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, er } head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) head.Location = term.Location + head.jsonOptions = term.jsonOptions + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + setJSONOptions(body, &term.jsonOptions) return &Rule{ Location: term.Location, Head: head, - Body: NewBody( - NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location), - ), - Module: module, + Body: body, + Module: module, + + jsonOptions: term.jsonOptions, }, nil } @@ -309,14 +316,17 @@ func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, head.Key = ref[1] } head.Location = rhs.Location + head.jsonOptions = rhs.jsonOptions + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + setJSONOptions(body, &rhs.jsonOptions) rule := &Rule{ - Location: rhs.Location, - Head: head, - Body: NewBody( - NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location), - ), - Module: module, + Location: rhs.Location, + Head: head, + Body: body, + Module: module, + jsonOptions: rhs.jsonOptions, } return rule, nil @@ -344,14 +354,17 @@ func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { head.Key = ref[1] } head.Location = term.Location + head.jsonOptions = term.jsonOptions + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + setJSONOptions(body, &term.jsonOptions) rule := &Rule{ - Location: term.Location, - Head: head, - Body: NewBody( - NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location), - ), - Module: module, + Location: term.Location, + Head: head, + Body: body, + Module: module, + jsonOptions: term.jsonOptions, } return rule, nil @@ -377,12 +390,17 @@ func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { head := RefHead(ref, rhs) head.Location = lhs.Location head.Args = Args(call[1:]) + head.jsonOptions = lhs.jsonOptions + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + setJSONOptions(body, &rhs.jsonOptions) rule := &Rule{ - Location: lhs.Location, - Head: head, - Body: NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)), - Module: module, + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + jsonOptions: lhs.jsonOptions, } return rule, nil @@ -404,12 +422,17 @@ func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) head.Location = loc head.Args = terms[1:] + head.jsonOptions = terms[0].jsonOptions + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) + setJSONOptions(body, &terms[0].jsonOptions) rule := &Rule{ - Location: loc, - Head: head, - Module: module, - Body: NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)), + Location: loc, + Head: head, + Module: module, + Body: body, + jsonOptions: terms[0].jsonOptions, } return rule, nil } @@ -686,6 +709,16 @@ func setRuleModule(rule *Rule, module *Module) { } } +func setJSONOptions(x interface{}, jsonOptions *JSONOptions) { + vis := NewGenericVisitor(func(x interface{}) bool { + if x, ok := x.(customJSON); ok { + x.setJSONOptions(*jsonOptions) + } + return false + }) + vis.Walk(x) +} + // ParserErrorDetail holds additional details for parser errors. type ParserErrorDetail struct { Line string `json:"line"` diff --git a/vendor/github.com/open-policy-agent/opa/ast/term.go b/vendor/github.com/open-policy-agent/opa/ast/term.go index 79e21ccae62..c4f560f6e25 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/term.go +++ b/vendor/github.com/open-policy-agent/opa/ast/term.go @@ -1840,22 +1840,34 @@ func ObjectTerm(o ...[2]*Term) *Term { } func LazyObject(blob map[string]interface{}) Object { - return &lazyObj{native: blob} + return &lazyObj{native: blob, cache: map[string]Value{}} } type lazyObj struct { strict Object + cache map[string]Value native map[string]interface{} } func (l *lazyObj) force() Object { if l.strict == nil { l.strict = MustInterfaceToValue(l.native).(Object) + // NOTE(jf): a possible performance improvement here would be to check how many + // entries have been realized to AST in the cache, and if some threshold compared to the + // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. + l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. } return l.strict } func (l *lazyObj) Compare(other Value) int { + o1 := sortOrder(l) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } return l.force().Compare(other) } @@ -1924,13 +1936,20 @@ func (l *lazyObj) Get(k *Term) *Term { return l.strict.Get(k) } if s, ok := k.Value.(String); ok { + if v, ok := l.cache[string(s)]; ok { + return NewTerm(v) + } + if val, ok := l.native[string(s)]; ok { + var converted Value switch val := val.(type) { case map[string]interface{}: - return NewTerm(&lazyObj{native: val}) + converted = LazyObject(val) default: - return NewTerm(MustInterfaceToValue(val)) + converted = MustInterfaceToValue(val) } + l.cache[string(s)] = converted + return NewTerm(converted) } } return nil @@ -1985,13 +2004,20 @@ func (l *lazyObj) Find(path Ref) (Value, error) { return l, nil } if p0, ok := path[0].Value.(String); ok { + if v, ok := l.cache[string(p0)]; ok { + return v.Find(path[1:]) + } + if v, ok := l.native[string(p0)]; ok { + var converted Value switch v := v.(type) { case map[string]interface{}: - return (&lazyObj{native: v}).Find(path[1:]) + converted = LazyObject(v) default: - return MustInterfaceToValue(v).Find(path[1:]) + converted = MustInterfaceToValue(v) } + l.cache[string(p0)] = converted + return converted.Find(path[1:]) } } return nil, errFindNotFound diff --git a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go index 83c6259c6c8..5567a2ae591 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go @@ -391,12 +391,14 @@ type Reader struct { verificationConfig *VerificationConfig skipVerify bool processAnnotations bool + jsonOptions *ast.JSONOptions capabilities *ast.Capabilities files map[string]FileInfo // files in the bundle signature payload sizeLimitBytes int64 etag string lazyLoadingMode bool name string + persist bool } // NewReader is deprecated. Use NewCustomReader instead. @@ -460,6 +462,12 @@ func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { return r } +// WithJSONOptions sets the JSONOptions to use when parsing policy files +func (r *Reader) WithJSONOptions(opts *ast.JSONOptions) *Reader { + r.jsonOptions = opts + return r +} + // WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger // than this, an error will be returned by the reader. func (r *Reader) WithSizeLimitBytes(n int64) *Reader { @@ -488,10 +496,17 @@ func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { return r } +// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. +func (r *Reader) WithBundlePersistence(persist bool) *Reader { + r.persist = persist + return r +} + func (r *Reader) ParserOptions() ast.ParserOptions { return ast.ParserOptions{ ProcessAnnotation: r.processAnnotations, Capabilities: r.capabilities, + JSONOptions: r.jsonOptions, } } @@ -595,7 +610,7 @@ func (r *Reader) Read() (Bundle, error) { var value interface{} r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.NewJSONDecoder(&buf).Decode(&value) + err := util.UnmarshalJSON(buf.Bytes(), &value) r.metrics.Timer(metrics.RegoDataParse).Stop() if err != nil { @@ -645,6 +660,10 @@ func (r *Reader) Read() (Bundle, error) { if len(bundle.WasmModules) != 0 { return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found") } + + if r.persist { + return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported") + } } // check if the bundle signatures specify any files that weren't found in the bundle @@ -1082,10 +1101,14 @@ func (b Bundle) Equal(other Bundle) bool { return false } for i := range b.Modules { - if b.Modules[i].URL != other.Modules[i].URL { + // To support bundles built from rootless filesystems we ignore a "/" prefix + // for URLs and Paths, such that "/file" and "file" are equivalent + if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { return false } - if b.Modules[i].Path != other.Modules[i].Path { + if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { return false } if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { diff --git a/vendor/github.com/open-policy-agent/opa/bundle/file.go b/vendor/github.com/open-policy-agent/opa/bundle/file.go index 45480f446a9..4892b686316 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/file.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/file.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "os" - "path" "path/filepath" "sort" "strings" @@ -108,6 +107,14 @@ func (d *Descriptor) Close() error { return err } +type PathFormat int64 + +const ( + Chrooted PathFormat = iota + SlashRooted + Passthrough +) + // DirectoryLoader defines an interface which can be used to load // files from a directory by iterating over each one in the tree. type DirectoryLoader interface { @@ -115,23 +122,22 @@ type DirectoryLoader interface { // descriptor should *always* be closed when no longer needed. NextFile() (*Descriptor, error) WithFilter(filter filter.LoaderFilter) DirectoryLoader + WithPathFormat(PathFormat) DirectoryLoader } type dirLoader struct { - root string - files []string - idx int - filter filter.LoaderFilter + root string + files []string + idx int + filter filter.LoaderFilter + pathFormat PathFormat } -// NewDirectoryLoader returns a basic DirectoryLoader implementation -// that will load files from a given root directory path. -func NewDirectoryLoader(root string) DirectoryLoader { - +// Normalize root directory, ex "./src/bundle" -> "src/bundle" +// We don't need an absolute path, but this makes the joined/trimmed +// paths more uniform. +func normalizeRootDirectory(root string) string { if len(root) > 1 { - // Normalize relative directories, ex "./src/bundle" -> "src/bundle" - // We don't need an absolute path, but this makes the joined/trimmed - // paths more uniform. if root[0] == '.' && root[1] == filepath.Separator { if len(root) == 2 { root = root[:1] // "./" -> "." @@ -140,9 +146,15 @@ func NewDirectoryLoader(root string) DirectoryLoader { } } } + return root +} +// NewDirectoryLoader returns a basic DirectoryLoader implementation +// that will load files from a given root directory path. +func NewDirectoryLoader(root string) DirectoryLoader { d := dirLoader{ - root: root, + root: normalizeRootDirectory(root), + pathFormat: Chrooted, } return &d } @@ -153,6 +165,36 @@ func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { return d } +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +func formatPath(fileName string, root string, pathFormat PathFormat) string { + switch pathFormat { + case SlashRooted: + if !strings.HasPrefix(fileName, string(filepath.Separator)) { + return string(filepath.Separator) + fileName + } + return fileName + case Chrooted: + // Trim off the root directory and return path as if chrooted + result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) + if root == "." && filepath.Base(fileName) == ManifestExt { + result = fileName + } + if !strings.HasPrefix(result, string(filepath.Separator)) { + result = string(filepath.Separator) + result + } + return result + case Passthrough: + fallthrough + default: + return fileName + } +} + // NextFile iterates to the next file in the directory tree // and returns a file Descriptor for the file. func (d *dirLoader) NextFile() (*Descriptor, error) { @@ -187,28 +229,20 @@ func (d *dirLoader) NextFile() (*Descriptor, error) { d.idx++ fh := newLazyFile(fileName) - // Trim off the root directory and return path as if chrooted - cleanedPath := strings.TrimPrefix(fileName, filepath.FromSlash(d.root)) - if d.root == "." && filepath.Base(fileName) == ManifestExt { - cleanedPath = fileName - } - - if !strings.HasPrefix(cleanedPath, string(os.PathSeparator)) { - cleanedPath = string(os.PathSeparator) + cleanedPath - } - - f := newDescriptor(path.Join(d.root, cleanedPath), cleanedPath, fh).withCloser(fh) + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := newDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).withCloser(fh) return f, nil } type tarballLoader struct { - baseURL string - r io.Reader - tr *tar.Reader - files []file - idx int - filter filter.LoaderFilter - skipDir map[string]struct{} + baseURL string + r io.Reader + tr *tar.Reader + files []file + idx int + filter filter.LoaderFilter + skipDir map[string]struct{} + pathFormat PathFormat } type file struct { @@ -221,7 +255,8 @@ type file struct { // NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. func NewTarballLoader(r io.Reader) DirectoryLoader { l := tarballLoader{ - r: r, + r: r, + pathFormat: Passthrough, } return &l } @@ -231,8 +266,9 @@ func NewTarballLoader(r io.Reader) DirectoryLoader { // with the baseURL. func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { l := tarballLoader{ - baseURL: strings.TrimSuffix(baseURL, "/"), - r: r, + baseURL: strings.TrimSuffix(baseURL, "/"), + r: r, + pathFormat: Passthrough, } return &l } @@ -243,6 +279,12 @@ func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { return t } +// WithPathFormat specifies how a path is formatted in a Descriptor +func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + t.pathFormat = pathFormat + return t +} + // NextFile iterates to the next file in the directory tree // and returns a file Descriptor for the file. func (t *tarballLoader) NextFile() (*Descriptor, error) { @@ -329,7 +371,10 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) { f := t.files[t.idx] t.idx++ - return newDescriptor(path.Join(t.baseURL, f.name), f.name, f.reader), nil + cleanedPath := formatPath(f.name, "", t.pathFormat) + d := newDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) + return d, nil + } // Next implements the storage.Iterator interface. diff --git a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go index 5f3317392ea..3aa253ee190 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go @@ -23,16 +23,26 @@ type dirLoaderFS struct { files []string idx int filter filter.LoaderFilter + root string + pathFormat PathFormat } // NewFSLoader returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { + return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil +} + +// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface at the supplied root +func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { d := dirLoaderFS{ filesystem: filesystem, + root: normalizeRootDirectory(root), + pathFormat: Chrooted, } - return &d, nil + return &d } func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { @@ -67,6 +77,12 @@ func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { return d } +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + // NextFile iterates to the next file in the directory tree // and returns a file Descriptor for the file. func (d *dirLoaderFS) NextFile() (*Descriptor, error) { @@ -74,7 +90,7 @@ func (d *dirLoaderFS) NextFile() (*Descriptor, error) { defer d.Unlock() if d.files == nil { - err := fs.WalkDir(d.filesystem, defaultFSLoaderRoot, d.walkDir) + err := fs.WalkDir(d.filesystem, d.root, d.walkDir) if err != nil { return nil, fmt.Errorf("failed to list files: %w", err) } @@ -94,7 +110,7 @@ func (d *dirLoaderFS) NextFile() (*Descriptor, error) { return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) } - fileNameWithSlash := fmt.Sprintf("/%s", fileName) - f := newDescriptor(fileNameWithSlash, fileNameWithSlash, fh).withCloser(fh) + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := newDescriptor(cleanedPath, cleanedPath, fh).withCloser(fh) return f, nil } diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.52.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.52.0.json new file mode 100644 index 00000000000..bfa43f4a597 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.52.0.json @@ -0,0 +1,4615 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.53.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.53.0.json new file mode 100644 index 00000000000..a89d1f859e8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.53.0.json @@ -0,0 +1,4640 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.53.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.53.1.json new file mode 100644 index 00000000000..a89d1f859e8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.53.1.json @@ -0,0 +1,4640 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.54.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.54.0.json new file mode 100644 index 00000000000..a89d1f859e8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.54.0.json @@ -0,0 +1,4640 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/config/config.go b/vendor/github.com/open-policy-agent/opa/config/config.go new file mode 100644 index 00000000000..bb914d8376e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/config/config.go @@ -0,0 +1,257 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package config implements OPA configuration file parsing and validation. +package config + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/internal/ref" + "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/version" +) + +// Config represents the configuration file that OPA can be started with. +type Config struct { + Services json.RawMessage `json:"services,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Discovery json.RawMessage `json:"discovery,omitempty"` + Bundle json.RawMessage `json:"bundle,omitempty"` // Deprecated: Use `bundles` instead + Bundles json.RawMessage `json:"bundles,omitempty"` + DecisionLogs json.RawMessage `json:"decision_logs,omitempty"` + Status json.RawMessage `json:"status,omitempty"` + Plugins map[string]json.RawMessage `json:"plugins,omitempty"` + Keys json.RawMessage `json:"keys,omitempty"` + DefaultDecision *string `json:"default_decision,omitempty"` + DefaultAuthorizationDecision *string `json:"default_authorization_decision,omitempty"` + Caching json.RawMessage `json:"caching,omitempty"` + NDBuiltinCache bool `json:"nd_builtin_cache,omitempty"` + PersistenceDirectory *string `json:"persistence_directory,omitempty"` + DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` + Server *struct { + Encoding json.RawMessage `json:"encoding,omitempty"` + } `json:"server,omitempty"` + Storage *struct { + Disk json.RawMessage `json:"disk,omitempty"` + } `json:"storage,omitempty"` + Extra map[string]json.RawMessage `json:"-"` +} + +// ParseConfig returns a valid Config object with defaults injected. The id +// and version parameters will be set in the labels map. +func ParseConfig(raw []byte, id string) (*Config, error) { + // NOTE(sr): based on https://stackoverflow.com/a/33499066/993018 + var result Config + objValue := reflect.ValueOf(&result).Elem() + knownFields := map[string]reflect.Value{} + for i := 0; i != objValue.NumField(); i++ { + jsonName := strings.Split(objValue.Type().Field(i).Tag.Get("json"), ",")[0] + knownFields[jsonName] = objValue.Field(i) + } + + if err := util.Unmarshal(raw, &result.Extra); err != nil { + return nil, err + } + + for key, chunk := range result.Extra { + if field, found := knownFields[key]; found { + if err := util.Unmarshal(chunk, field.Addr().Interface()); err != nil { + return nil, err + } + delete(result.Extra, key) + } + } + if len(result.Extra) == 0 { + result.Extra = nil + } + return &result, result.validateAndInjectDefaults(id) +} + +// PluginNames returns a sorted list of names of enabled plugins. +func (c Config) PluginNames() (result []string) { + if c.Bundle != nil || c.Bundles != nil { + result = append(result, "bundles") + } + if c.Status != nil { + result = append(result, "status") + } + if c.DecisionLogs != nil { + result = append(result, "decision_logs") + } + for name := range c.Plugins { + result = append(result, name) + } + sort.Strings(result) + return result +} + +// PluginsEnabled returns true if one or more plugin features are enabled. +// +// Deprecated. Use PluginNames instead. +func (c Config) PluginsEnabled() bool { + return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0 +} + +// DefaultDecisionRef returns the default decision as a reference. +func (c Config) DefaultDecisionRef() ast.Ref { + r, _ := ref.ParseDataPath(*c.DefaultDecision) + return r +} + +// DefaultAuthorizationDecisionRef returns the default authorization decision +// as a reference. +func (c Config) DefaultAuthorizationDecisionRef() ast.Ref { + r, _ := ref.ParseDataPath(*c.DefaultAuthorizationDecision) + return r +} + +// NDBuiltinCacheEnabled returns if the ND builtins cache should be used. +func (c Config) NDBuiltinCacheEnabled() bool { + return c.NDBuiltinCache +} + +func (c *Config) validateAndInjectDefaults(id string) error { + + if c.DefaultDecision == nil { + s := defaultDecisionPath + c.DefaultDecision = &s + } + + _, err := ref.ParseDataPath(*c.DefaultDecision) + if err != nil { + return err + } + + if c.DefaultAuthorizationDecision == nil { + s := defaultAuthorizationDecisionPath + c.DefaultAuthorizationDecision = &s + } + + _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision) + if err != nil { + return err + } + + if c.Labels == nil { + c.Labels = map[string]string{} + } + + c.Labels["id"] = id + c.Labels["version"] = version.Version + + return nil +} + +// GetPersistenceDirectory returns the configured persistence directory, or $PWD/.opa if none is configured +func (c Config) GetPersistenceDirectory() (string, error) { + if c.PersistenceDirectory == nil { + pwd, err := os.Getwd() + if err != nil { + return "", err + } + return filepath.Join(pwd, ".opa"), nil + } + return *c.PersistenceDirectory, nil +} + +// ActiveConfig returns OPA's active configuration +// with the credentials and crypto keys removed +func (c *Config) ActiveConfig() (interface{}, error) { + bs, err := json.Marshal(c) + if err != nil { + return nil, err + } + + var result map[string]interface{} + if err := util.UnmarshalJSON(bs, &result); err != nil { + return nil, err + } + for k, e := range c.Extra { + var v any + if err := util.UnmarshalJSON(e, &v); err != nil { + return nil, err + } + result[k] = v + } + + if err := removeServiceCredentials(result["services"]); err != nil { + return nil, err + } + + if err := removeCryptoKeys(result["keys"]); err != nil { + return nil, err + } + + return result, nil +} + +func removeServiceCredentials(x interface{}) error { + switch x := x.(type) { + case nil: + return nil + case []interface{}: + for _, v := range x { + err := removeKey(v, "credentials") + if err != nil { + return err + } + } + + case map[string]interface{}: + for _, v := range x { + err := removeKey(v, "credentials") + if err != nil { + return err + } + } + default: + return fmt.Errorf("illegal service config type: %T", x) + } + + return nil +} + +func removeCryptoKeys(x interface{}) error { + switch x := x.(type) { + case nil: + return nil + case map[string]interface{}: + for _, v := range x { + err := removeKey(v, "key", "private_key") + if err != nil { + return err + } + } + default: + return fmt.Errorf("illegal keys config type: %T", x) + } + + return nil +} + +func removeKey(x interface{}, keys ...string) error { + val, ok := x.(map[string]interface{}) + if !ok { + return fmt.Errorf("type assertion error") + } + + for _, key := range keys { + delete(val, key) + } + + return nil +} + +const ( + defaultDecisionPath = "/system/main" + defaultAuthorizationDecisionPath = "/system/authz/allow" +) diff --git a/vendor/github.com/open-policy-agent/opa/format/format.go b/vendor/github.com/open-policy-agent/opa/format/format.go index dffcea77fe7..22cabd9605b 100644 --- a/vendor/github.com/open-policy-agent/opa/format/format.go +++ b/vendor/github.com/open-policy-agent/opa/format/format.go @@ -732,7 +732,12 @@ func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Co func (w *writer) writeRef(x ast.Ref) { if len(x) > 0 { - w.writeTerm(x[0], nil) + parens := false + _, ok := x[0].Value.(ast.Call) + if ok { + parens = x[0].Location.Text[0] == 40 // Starts with "(" + } + w.writeTermParens(parens, x[0], nil) path := x[1:] for _, t := range path { switch p := t.Value.(type) { @@ -909,12 +914,17 @@ func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc * } func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - if term.Location.Row-loc.Row > 1 { + if term.Location.Row-loc.Row >= 1 { w.endLine() w.startLine() } - comments = w.writeTerm(term, comments) + parens := false + _, ok := term.Value.(ast.Call) + if ok { + parens = term.Location.Text[0] == 40 // Starts with "(" + } + comments = w.writeTermParens(parens, term, comments) w.write(" |") return w.writeComprehensionBody(open, close, body, term.Location, loc, comments) diff --git a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go b/vendor/github.com/open-policy-agent/opa/hooks/hooks.go new file mode 100644 index 00000000000..9659d7b4997 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/hooks/hooks.go @@ -0,0 +1,77 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package hooks + +import ( + "context" + "fmt" + + "github.com/open-policy-agent/opa/config" +) + +// Hook is a hook to be called in some select places in OPA's operation. +// +// The base Hook interface is any, and wherever a hook can occur, the calling code +// will check if your hook implements an appropriate interface. If so, your hook +// is called. +// +// This allows you to only hook in to behavior you care about, and it allows the +// OPA to add more hooks in the future. +// +// All hook interfaces in this package have Hook in the name. Hooks must be safe +// for concurrent use. It is expected that hooks are fast; if a hook needs to take +// time, then copy what you need and ensure the hook is async. +// +// When multiple instances of a hook are provided, they are all going to be executed +// in an unspecified order (it's a map-range call underneath). If you need hooks to +// be run in order, you can wrap them into another hook, and configure that one. +type Hook any + +// Hooks is the type used for every struct in OPA that can work with hooks. +type Hooks struct { + m map[Hook]struct{} // we are NOT providing a stable invocation ordering +} + +// New creates a new instance of Hooks. +func New(hs ...Hook) Hooks { + h := Hooks{m: make(map[Hook]struct{}, len(hs))} + for i := range hs { + h.m[hs[i]] = struct{}{} + } + return h +} + +func (hs Hooks) Each(fn func(Hook)) { + for h := range hs.m { + fn(h) + } +} + +// ConfigHook allows inspecting or rewriting the configuration when the plugin +// manager is processing it. +// Note that this hook is not run when the plugin manager is reconfigured. This +// usually only happens when there's a new config from a discovery bundle, and +// for processing _that_, there's `ConfigDiscoveryHook`. +type ConfigHook interface { + OnConfig(context.Context, *config.Config) (*config.Config, error) +} + +// ConfigHook allows inspecting or rewriting the discovered configuration when +// the discovery plugin is processing it. +type ConfigDiscoveryHook interface { + OnConfigDiscovery(context.Context, *config.Config) (*config.Config, error) +} + +func (hs Hooks) Validate() error { + for h := range hs.m { + switch h.(type) { + case ConfigHook, + ConfigDiscoveryHook: // OK + default: + return fmt.Errorf("unknown hook type %T", h) + } + } + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv index 3d7a030acab..aca89887eb9 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv @@ -344,12 +344,18 @@ opa_json_writer_write,opa_json_writer_emit_value opa_json_writer_write,opa_free opa_json_dump,opa_json_writer_write opa_value_dump,opa_json_writer_write +move_freelists,opa_abort +opa_heap_blocks_stash,move_freelists +opa_heap_blocks_restore,move_freelists +opa_malloc,opa_free_bulk_commit opa_malloc,opa_abort +opa_free_bulk_commit,merge_sort_blocks opa_realloc,opa_malloc opa_realloc,memcpy opa_realloc,opa_free opa_builtin_cache_get,opa_abort opa_builtin_cache_set,opa_abort +merge_sort_blocks,merge_sort_blocks opa_memoize_init,opa_malloc opa_memoize_init,opa_object opa_memoize_push,opa_malloc @@ -569,14 +575,14 @@ opa_sets_intersection,opa_value_type opa_sets_intersection,opa_set opa_sets_intersection,opa_set_union opa_sets_intersection,opa_set_intersection -opa_sets_intersection,opa_value_free +opa_sets_intersection,opa_value_free_shallow opa_set_union,opa_value_type opa_set_union,opa_set opa_set_union,opa_set_add opa_sets_union,opa_value_type opa_sets_union,opa_set opa_sets_union,opa_set_add -opa_sets_union,opa_value_free +opa_sets_union,opa_value_free_shallow opa_strings_any_prefix_match,opa_value_type opa_strings_any_prefix_match,opa_value_iter opa_strings_any_prefix_match,opa_value_get @@ -785,10 +791,31 @@ opa_object_keys,opa_strncmp opa_object_keys,opa_value_compare_object opa_object_keys,opa_abort opa_object_keys,opa_value_compare_set -opa_value_free,opa_free +opa_array_free,__opa_value_free +opa_array_free,opa_free +opa_array_free,opa_free_bulk +__opa_value_free,opa_free_bulk +__opa_value_free,opa_free +__opa_value_free,opa_array_free +__opa_value_free,__opa_object_buckets_free +__opa_value_free,__opa_set_buckets_free +__opa_object_buckets_free,opa_free +__opa_object_buckets_free,opa_array_free +__opa_object_buckets_free,__opa_object_buckets_free +__opa_object_buckets_free,__opa_set_buckets_free +__opa_object_buckets_free,__opa_value_free +__opa_object_buckets_free,opa_free_bulk +__opa_set_buckets_free,opa_free +__opa_set_buckets_free,opa_array_free +__opa_set_buckets_free,__opa_object_buckets_free +__opa_set_buckets_free,__opa_set_buckets_free +__opa_set_buckets_free,__opa_value_free +__opa_set_buckets_free,opa_free_bulk +opa_value_free,__opa_value_free +opa_value_free_shallow,__opa_value_free opa_value_merge,opa_malloc opa_value_merge,opa_value_get -opa_value_merge,opa_object_insert +opa_value_merge,__opa_object_insert opa_value_merge,opa_value_merge opa_value_merge,opa_abort opa_value_merge,opa_atoi64 @@ -798,19 +825,12 @@ opa_value_merge,opa_strncmp opa_value_merge,opa_value_compare opa_value_merge,opa_value_compare_object opa_value_merge,opa_value_compare_set -opa_object_insert,opa_value_hash -opa_object_insert,opa_value_compare -opa_object_insert,__opa_object_grow -opa_object_insert,opa_malloc -__opa_object_grow,opa_malloc -__opa_object_grow,opa_value_hash -__opa_object_grow,opa_value_compare_number -__opa_object_grow,opa_strncmp -__opa_object_grow,opa_value_compare -__opa_object_grow,opa_value_compare_object -__opa_object_grow,opa_value_compare_set -__opa_object_grow,opa_abort -__opa_object_grow,opa_free +__opa_object_insert,opa_value_hash +__opa_object_insert,opa_value_compare +__opa_object_insert,__opa_value_free +__opa_object_insert,__opa_object_grow +__opa_object_insert,opa_malloc +opa_object_insert,__opa_object_insert opa_boolean,opa_malloc opa_number_ref,opa_malloc opa_number_int,opa_malloc @@ -818,7 +838,7 @@ opa_string,opa_malloc opa_value_shallow_copy_object,opa_malloc opa_value_shallow_copy_object,opa_value_iter opa_value_shallow_copy_object,opa_value_get -opa_value_shallow_copy_object,opa_object_insert +opa_value_shallow_copy_object,__opa_object_insert opa_value_shallow_copy_set,opa_malloc opa_value_shallow_copy_set,opa_value_iter opa_value_shallow_copy_set,opa_set_add @@ -861,15 +881,27 @@ opa_array_with_cap,opa_free opa_object,opa_malloc opa_set,opa_malloc opa_set_with_cap,opa_malloc +__opa_object_grow,opa_malloc +__opa_object_grow,opa_value_hash +__opa_object_grow,opa_value_compare_number +__opa_object_grow,opa_strncmp +__opa_object_grow,opa_value_compare +__opa_object_grow,opa_value_compare_object +__opa_object_grow,opa_value_compare_set +__opa_object_grow,opa_abort +__opa_object_grow,opa_free +opa_object_remove,opa_value_hash +opa_object_remove,opa_value_compare +opa_object_remove,__opa_value_free +opa_object_remove,opa_free_bulk +opa_object_remove,opa_free +opa_string_copy,opa_malloc opa_value_add_path,opa_value_get opa_value_add_path,opa_malloc -opa_value_add_path,opa_object_insert -opa_value_add_path,opa_value_free +opa_value_add_path,__opa_object_insert +opa_value_add_path,__opa_value_free opa_value_remove_path,opa_value_get -opa_value_remove_path,opa_value_hash -opa_value_remove_path,opa_value_compare -opa_value_remove_path,opa_value_free -opa_value_remove_path,opa_free +opa_value_remove_path,opa_object_remove opa_lookup,opa_value_get opa_lookup,opa_value_iter opa_lookup,opa_atoi64 diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm index b2fef4cfa569117b3a986a44c85b2dcb541a9332..70aba781e7b6aa2590f34405e9837b2a717f32bd 100644 GIT binary patch delta 85905 zcmdRX2Ygh;_W#c8ZZ>37E|8Fr0(X}ddWT36E>Z*p3u0H$XL~_W5ZkkCXo_M1FF2^6 zARxs8h#Kr5D%cPe6a^uO1rY@-h>H0Co-_B}-Aw|b@ArPc_m_{^y>n;IoIYpHl>KB+ z-OY>hmkt*GcY<%(CySx!q0G$8U}iuBWguWbDn>L4;lDsY_(MTaQv`!j27@7)9u&2L zB0ZR%9;_v0D7d`6{86NqEeM4}kNWG_tpmm4Jo}8m#@fR(uekF3Vb`30+0_>fyLj|P z7x^{{yHzj}%c0^+FFOCqVHaF>#Xm+|HSDS}=U;WH@9b=^oC}8ij=%g5xSa;^VOe}(@u|5pDt{~P|@{%`%?`S@U`IU!8O6OMRT*?vI|mLh*kFS zDaQ_2H?geDXUfv3G=0S~&_BbM9tej`Uq_j0Y#EDp5t;7i=XCE`R(cj;({F6C-^wIogoSUA+xPbF^m+goΝo&&W6*f1k_fDn7CIXSBrkhM8^ZYpAjxgiaa> z-GoqD`UDA17@yczWj4(HgaMP}p*hqo-udwEiOjlbuQ8&&et3K@vvHSo^ca+mpIg&BtkL}Bc) z_g+<}E_m^s_6GXXW1~H4bnPMsHEDr$las-MguH`3&t7{E#&mqj3*7 zpG4N3_O@F84w?5>LuR3|`?ds`y2P%iUAHnn2UWvQLe(KUKmJ$7!w%sA5N=o0%C>LIPVV*b?7Fet#42Ji;L+C%FsG_ukBo>O6c~{il@Vb{z&n#R z5+dk=tWjDI+fCU$p3YFSe-HATYE#D5NhUT_t z`31E>^Xp2P|1k`>?0rP|fDrx|n3ZT4PvqwII4YKA5u3LWcOvjO&Nn}m*@yD##(pG5 z89%dxg^}=N{K9X3;=jN1UuG)71DxL~mzaTq#OB}xU-i%6qRWN_<L0& zFtP!_A~G~aL?w(?4IAPQ+JEE+W32gx!!JFMVl-#Zsz-R&l-VjVh9?)5!$(m#9E`-b8*zq{PCwF2Ip`!)%cP6 z%7-h`I#z}EaoC!}E(99BG=nNZaNPW+LemMr1CBejuWZ!FZeFwm;o9Ae>)88>#)uiq zFRnL0)||9-LYdF3Vdagn@~*bR#u9s#8IhBgF8|zYAiB?{w|W$oJuQARL%MD>&8lfE zAqt{3K-o%NQ-7NL&h8z(#epvp;BSr=5&U1Hm&!_5F2Ax-J;5jz#W>3AziZ?XM~ECW(IsB zApPN9K4)I#<9SwFpTJaVS#kZ^_c@~s4I&%~8=LJN#ob+Mp=ccPwrW}ssUWr)&yb$G zy7{P9Wz?bI)_yN5+rGJJ1F?Mh!%bTYvDkjU*?#e;{YLXM5p_>(Q6MXp+C5toif?!p z_fabf(w*WbyyA)$&Bfy7=`C*%&fBHSs%?Z~uTf@;L=U>k9@4rb|0%-F_=e~UTgk8I zw~l01u$K@K@`MQDH?%H3UQykAQ$A(}FydQjhEw1(?%{DYV6F6(>hGmo#r^JgzF5K1 zsDL1Px9N1rENw8-;J^me_xd=905%T>MTlkqlFd(#&|%qtfd0qgkaC~m13q4GE+g|3skc-5>o4H_;c*NT^Glm zO>DbvQbJpwl~NiBvLI586;us;4YDUEhMPfzcDi*?_gtxUI(3_B294*aZ6q~Z2?uyz zYh8-&(XQXUVV$j{vXMa83`9csgX)11#y9qv-TSrrl0alZ&$Uy)H%wG@83`jjD(E9M z>K_peKtb1ae@*PPrybi+?6#jhw!g`OhDM5X%vFIrX0B}J!m0t58QP*<8?fVJyH(FK#mCDh^*lz1 zt#dPu6ZR{;28gZ8GmaZAGPSueSZUm2-`Tqv-mK_7K;pyS?*Rd6K6Lydai3Ea(zr*%Qm?WcW>H}<+S|FEw*Ga}a6)6XnH@wzil)5V+Y-_JfnylbC! zPAe2oJSRq;DtO*$1-qEk6ysejC^N;_NF@+)g?-I=ZBcpQc?HzltHTc?Kb&VH_dH*58q zO-{uP`f+`{B3D4gh5F43r{YukF}5^blWU;nB>ifJQ*)YroD#3d6;N^K9E~J>;PFTO zxK}?ilKQKO&}>X6`KqRnf~bQIY+tOCWZKXi#arCsPLc`Hr7y%w8EaiSGhWIV>e7P_ zE&#^ppmfq)#&QOI;BlIMoT49NKvQxJ%SRZm*ErJGP4TZ4@vl1_A~wussO#wiP}l0m zP0r(w`f;!GxKlqKa2}^=Moqy-j2chU&odm{pVE&@oyUdxafS1^K|ii{9@px}P5P1W zW>hpa+^_!1edwe89k~REYV}N9ipgrM_LL%%89SE`zYrr97mquyQh5Uq-=M<%u_NBRt6F$+Hgt(mE zMKR~X%q$wD2q|Dvv{H(^ou?y%P#kYXW0}>)f!U!nN?{gCN}Q77q+VjW8-B^%6qiOh z5IeeV*83c&8+@hFRNc`gw`{F1sVm)+e#>3qeKc0%=xIVUsd@J!8m8&a4!C7ItLsd^ z<<9Uv8m`(`jE9|S7&t{ZolO&^(RAI}47Y5OsD?8AmOI1yXhzk}s`O?0Vl2l_PzeFm zh|)3%Aow6{nrZv2LP(k3LhhZUfr7kJ${puL@kqMhr-dJ%)xxQU2&|LLL)a(h z1gOOh)NUTs7~r%4fD#5<;(|@lVBdzGareRub&@3vw}S&0qK~5sQEGg*xGfmrlK>e4 zCydWnE4yNJLdnZ!3-(H|D3~)0ouP2zWmz` zo8phE<9+x=iE=ABwy=^T4-Pe)ftw?WPSOogc$n`bodAX^Iw42pNN3ud7!T_d386!( zImrQ8DP&3xsHmb7+gKd#bdql5#X(9Z>BMD>WIQV2=t9RIPRzmMuM>+9Cm3~IW2S=% z@hAQ@A;w9Fd=lazMj*Tq4Ai!u?c#+bi-;OT$EzA?l-g47(^&Oy3A=jCSJa z4^1TDLwPF`;}tii59Bux4rv4frI7# zB@C0pG)@u7mOf5UGr7pH`Md~>at%N;ngZ;d*jDx=NX2^A2iowQVj0(17<~dgeL6ex z=*$UTUPeRXIU>i4=7^||*6hlBRvCr0#^>}M zh=HDLlf6qSVYHVd&KOsTGKiM~olvy{UbSrEzaCjB{NVRTnvuAg$#cx8&{Ix?LnLy} zlSPpqM@Z%GsYQWANuqKgrg4?ap;h)n%Q#Mc-8ltV&?**qRFYOvmE5pjBp`Kx#>E=lrQToU(M{k2uG z*Ya2KT9he-5=VmgNG;V)5=s|<Ur5%8LRaguXtF~Mqd^lYi4-}{29RuoxAa*zVlJn3 z{!;2dJ2D@Zun8EP7a$QVG)@+i3A-WK!oZ;AGCD>fQzzrf#GmFWG#`Q+-g7i z&${7dl%R)G^C3C+?mz4Ha(7->a*V~9ADZ)ooBDt}rXX-ymMWXzc_@^5^|+t#-jG_A zEpllRtDU^WH=5ZJweXUuQScJq9cJheKv?edL-4f2!jzNA78Yi!LH53|`!=PTxB$g4 zpwXKO(WDrUsiT>6@>nsN?L_q$pq-tdMsRkK07OmhIvQeph)skSO=rM%s*Dd4ZXP2a zX1;(!Rj`IeoS;VB!l_OTd0+`x;wlU$Y>W^V<=Ekt9e*pJ|DNlG4u;%O&Y(feGu$@d z4JPyw-4hA{xEn?k59r53lt)?@QNlNs!J%u+0ZpE&OHYYYT_+eok{w`NbtESUJP9bk zYmzizmn0q{89}yZQolApmk@M0czDEOnv>OFhVCd^fpO9C&NK%9b;KAIAcloJA_Pnn ziCD${BEMX$f^n>{aPhaCNYLsA{#K`JjtF5VZfb`n`)n^RS}ZkCi)XeX zPl^<2RVT$5X$)(o7BrC(r3utOm=tyt`()G)XwC_2Ly zM^uMda%luYby^Cl>b4{t;GuO(eiI1iQvYs4rjB2S8vyoW?h;j_?b zgED*d`L!CRmQwYvsTh6_E;p$O?+AX?4vpv<`;pLwYd=7r#w#==5CWa#ghxHt?j2>fW`V3y?hdDXh`y?TO4KdL`FN&~J zZ?xB6mK9?bYH+;UnuARpHv>%?4^Ln9ubO+pTOU^yh@Zs&pcz$4O{ z-x;$&yq2}*jyShmgz=g-oF+ivQPHpW>4C)<`At8qOXoQfjH0q9e)KcRu0JZLhZ6uf zh*$^;;rPHIX*@)@Mxj$Kuevg??`)!h z=H;vqq~uAwbf|VZ15Jc6MtMsn_F5gr)~J$WUHgMGq`kh0)QvJXk@k+&l!Abt0YWCgH0U)2R|M~%*Iuazsli5aah__4crDPeBystwF=yS-xc zVSDEN$q7CGrjDj(-dCK^^Sr6atw^J}=Vz~K5&oX$xp&-nTtvOkJl_Oe)anQ)A2lUZ)sTsZ;F4b~@FO z9G&xU^eG}BS}3urT_#XO7Nr1zI_p`ztMl?$&+-!xan#HneDy5tr=x&E+J)%evj>*i z97j)FsU;~WHPMdP(@{zcByi-`O3m*}6c|%=isn6$=K!JUy=zyDgJ!F{E8TIL3Dhjr z0~djSse!F8T+`|=%ds#4>j{qBPhy-TdGl%Gp{Q;owf7G zXD$*g^|nMpz%d$k$j#hwZvuP?j;+tiu|K;mq7M)_@cmGF!2^=J-;=bTx2)1GEesO$hCfPu0%1Q++@#*nyZe_E z8#;j_*hkA?94TR*4`f96UWxtk4j19oDU#6tfX^!Xwi{Yv-`wyU^IH)os3DKQKI@xD zCIBq&12PtJ_?cKT&;H|vhTXrN=haE7KZom2nje2#R_1*-_$~J34ZpEzL%kJ{lmg0N zcxR7%Bog|^I6i)3elunXjxaFcWdxrkkDX#BIXXw+hfBp#c3D6)&nq>I|E$jn=wxJN1+KNaRM&4DN@lmLspbz#2K9> zH+8SW?UP#tkeiV=7h{)N^P8JB8fY7rOQQ457|$f8XcB|i@1SGK**&6YJ&BsGI4a4ScDv-;Ol}jr(J*k3eIkMqk(`mUTM4+RdYsSAn_c zcf3!+eF*VRQdf@9YgZ7Uz*8Pnwzrh!|C5PD5J>t2EMH|>gPK;(`gsy1nR^nYtV9@_ z_53tR_ETdAlS*4$88O0hjXi8^Q*4U6ZEQ#UeR*t=bGSwC)qoepp{sAlb_`Y!0`{uq z_3S?5YM=SHDA!vU38^^kB!(o~myOGhY1$D@Vrp%0$80c~NH|X@t;@x@HMo#i*qfC|>NN0Raa?QxP==Sfua1&_B;7rZtVo+6Qya8V+jR}!sB zM#mDwT!6&&s{Pn_v#-NM(wCs?A%m{C;-#$5x;M-T5{#L_uCr@R=$^wIV*+{234cJN zec^->*wL&Bc{TVw{P8OLr3rQ0$E62c5*`RO?kFm-1_v(aBr$uaw2Aq#cvTSTYIRi= z$E&fduFP{%vOj3p)6PY640}0ALE+bvxO`-={SO0|ZBxUY=A?%b0mJMF2aP;Ez#HtI z*x7J(1i3}zI^YM|-_pt1dG9u{uf3(7*lXW=%PBRvw$>pGI8^EFZY{k6zs}Vo-x=ou(w_+rXffor&Hyy|wxRgD{kA6JL%YW9-BLaz zR4p~=0nyC9`u1&d_T1&CO@6}08WaoLx#dVSAqa27Z?zdY{&2OOh;3gXy zB(D#{Xn6xvcH1PEApAW^JJ{;0fllVc(8C%!ji&4D^eJsJY(l|3#B2ATLT4QQ6wwF% zqlhx}N{H6AFQ0l@Yv+VBdFh?!|G{bP zah#^j^eV4Un_h+&A5CxWwA074;75#mbHwX*<9k~ro|h6>mWc>GyFn&QQjDApWpIy2 z(=xs>Gd@$I&Fj`4-jLx$q{8}f*@QA20)Q_-^ZQS7d#L1paH}=9fFKW0`&BeXSvu+4r@CSk~Q_-$O&@(Z%&ux*NE1N+)viS0_ej1fwEOD(dm<8+f!P z_Tc+-ntB*g6qN8l<3>=&u~?Fn#zy;&``e3+_R9MkXM5Fr987a}_OJJsRL76r4>SPY zmp(vv(`^usK2YDeU!e+DjNSH*2YU5jn}iDvnL(U?h||KU(I6HYo$P1|?Z@E!s2@u+ z0eyG~(d30Q`{Zis-4+z_0~olta%LpNhm25f=gig}stQogm1$C6sb|Q8N&#fxtO5_n z<+D0gwveT>n!A35mOXG>Y3Ho=Jrj85%8PBob+PmSAI9T77FZGw6k`g7GsR~=SU+?t zAL`m=kA3i@>Uh25!3!ECP>wx-F0bHE9bS=V{l+eRDB89fj94Tz`0;EgO5YZd((<%o zqy6GTO)3-n{X;#g!=(4@R?zCJW}lphPXOzptT3)BvRwF74B5+}+4U7L5iJG+rXH>FGHRK1F2$xsZd0ACk5Xx%lRg8?f)1 zPd9(OF#ilV$ju8{+Pxm>lH;&KXB&b*fYwaxsgFe3X*B%RV(T|vX8lQiYcF2dtmbz1 zxA3*JEkc$WFWB`LxL)dl1@#jUxa4nlzVdH$9;*$v>bPQmU+3ItLRp}iGFeSou$r<^ zHDxK4%2FXXOQ6tRvM2&Bytk;}{7;DvoKt(_Zj5qP$Z($*(X}F0iub&6dh+wsGI|br z@8?WNet!o)`;AYR4_f@Mkev6V{l>DvZP!1ETD*-R#qv7gj&^faMDTqzf8Vp>M!dc3 z*;c6ez_a=I{>-x-vYi;oxuqaNsPC~4J)7H1FICaZnuatAsy?`lJJZ(duD;QlLyzZ5 z;8L%9?iKXd^?5jt^gykcyjjG=&Ct^IgsV=s0r-0R{^xt-t)?;@-Nyb98h2A6$Urm= zqI)^ZmM-=HqudxN^T`AxTr6wWzgtk?WZN#w|`H=jY>Id9E2?juo=`0vwNYM=ae z9UMx(^lg)lZ)%lW^v+G-{@3do*(a<^N4tV|P5b(FS8!Qde!g8!``ljeZVO(<%8& zHW0ws8wlV}8wlXJ8wubG8wudnjRf%051A3;J_5kykA8Ha$YjM&$FbWKoDyDZ7kx}! z_5Ju+vHG4*1{%7j9O{YQziprW$uLI^gTyvUYiPCw6WO-;6Ov4m&4keK&4kd?n+c(! zErigGTL_^Cw~%C-eaZ;+-bx5vzBLgc1hqhD+crWdwE_s)uT;1gG9I=GV05JfRN`Qq zEI&}BOTLE2A!^99$A)l5h6N~6U&MK;3YW%UofL8j-y>;yPam*sfgfd4oEjV4GO{~OXP6o|guaoA)G!eOv^{lV6*6Up zu2VgmisJDr0rvw}h97U)J1GYF$QGYFrwbgG3y^I*IO zzKNxN*tj;^nV1Q82OAOjNHc~BLpwQWI}q# zePF7AEw{A(fNwBG(5FpN&)(yYx{=l+bR4kk%O=&J0T3F4!R_4|bvrl$5iAhSpOX$b zsty~4{=rV2?i?)a^CxA~LZv-5Bk!;P*fy#H;9?CxGJyYtN^mEn>q#o1OC14-N-80Y zBncqVM9NMop$Yl-RN@E-c`D&yq*db25Q0i*%&J!jk*pHlqi&R}66wdXi{luNr0+n6 zX}fASIps*EZNFC|_dg-Vzhc>^{!K%IVOM8J+Hy!w|BkshK>xnEcmP<$|4ruN@cA!m zXEL8FsUrx??eS+inCUYUd|j9yF_vgO4?k|7{`vR1o@U#ydZX(Zsu>&<}A_y1p5tUF) z8ro=xb{b$Kkgg5iOHR~&`-`+N#^MwNLv$F--u^|bJqMs@7=**353Wjo(oIR36Q1K? zSfdA|d#Q?Tf7v)j0VjfXo|K{$Q^OyrSGqL8xns48@T8ScUa6t`hE4Tnn^W{MrzWr-BK;ZB=&lsZZT;M4l1U5xX;I$p{eBQ|uKx>&J9F4=cEhhO{x4)wNmV9k!sz4yn8asqf^=3Aw@H!a;}15O8=LGCzG)wO zfTj)z5wzw7z$Vr`9hfKTo{!Cp;Gs58^>rc!8itVG3tP~oyPe&Y76s)5l@sefkltA7 zjxFP|OFo@~Fo-t8`)M60m4#IqizqbAwMrJcVA|Xp+^{=|nvo#s2plYef_j>iRH`QF z?j$lh#9Oezp|s|LeYe~@o!-IK1TL=FBN{gFWD1z8Yx{+?AT1sAg^6Vd0YH*mln6Jc zmwA}I&e=;>TfMWl2!FrXTNr&=;>;t#r3nqDw`E{LRBF#gYhh^8bl$e{w9q{ybG$r2p-6FJ{w8xziYk!D|%-i;RKexa%_t4KnYI6ty4)~kO zKxg=B49WI|2YQ0X^AEJc-%SUK@mKs3iD=$HZd6TJfvy7N)9i+{kdU#D`{hj3o%KtT zfm?|Pp80Ik->hvDCzQE&k3m%sIQqgT&{+vra?p0#K#a4;%zK>Nf<;l*BI{SP=H~!HR=2fMo(u6c*8@^<)lde!XrhWL9T{~hyt8+@Pt`>D+wy1;NcR|4X%OJo-k z#oP@I;r7f!!|PUh%Sz&N-cC(cY;0^N-Rb$qUBaHUyM?D%4AE1pI!M?4HGMMn4vAHx zPi2r@IQ*nYbcz&8vpd52G%T2%qIO4mzFWywd}KZJp6AVMIb?f|%Y?VlL%$h_Q9~S? zBNxMp8N-R7bcSYulTOS}h8KSXDwxKzk`Ss1QGNIdvSz|li;gDb4&ofj&>i1UWMm;VrVN}}#ZXX#j+fC)78ip0DK-m4D8pg;kl4~2Hd1*Q;dBb^h_kQ@&$F{stvIPk zu^$luCc*+Sl=VvO4Ak_9#wX78SHjo*mhOEsyl>gww>hiM2q%~V|f$5AKG_)`(>rAFw! zRKTiXhE!iE8pcvx_C&xCkeiAgEnf4d&RiUc6Zjv3{%g33WchliLd+V~wj^MEu)@LB zInIP}-ih`RO@wE$hx5}ifO=et8#6$spyg~N7}KRH@r#yXjk?$`qO~aTMCykijsy(E z)l9$0GZ5Ob?ioLl_p^Lza6p{z?7!6d;iJ)qH}0s{0-{%49YMoBAPU|cWI5|4$_$Ev z=C9K6E)p8x3S%u932r5z!f?1YigCMvJ^*GMP!|V9NyfJik+Xl1_L}5})xMz2Q7ePu zIPsebqzGJ7sixErd19&Rn<84aU;HptewHhP>!PKvgsjnLX(;*y1zL2x*EmQtsh3hk z69-J!rPLB6TeA`XQMl`~{tUE=ghYE|1$T|=dmdp-)P*6@u8~)o1QsDq6-4r!s(w-^ zF^X;hB`0JoRlkPBSz?JAnkxDg@rIFn3XE_H)^QI%Q-_fL3iWKNXi)_YHqdA|zo&{; zwRJBvr}0^DEzy`BTSGJeS1+gmt?;L77xBOm2MNXCUExt+s(toQ`@_S;*oD9*{ z#im)MwyIgC2U(qs@3o$D)DLr3zMUBaUz>?@{!6GFvp5=c4yiXYMZUacocc0TG^@uu zPY{Q^L{$(|!)Y0RkY4A*MJ-jWvc%xRZ&?J|gh(hsIv2QG+m)J;B`y%ptKYIjN2pg@ zL;On|RF4^=dA?Q>h>Wwbq>LP_lQQlzM633R%1qM=%qLnfY~wdV01X4K7Q?SYQa69L>_SIkf9xPB0v0TFlPV~VpxJ)WOf z8@P5!14-kExbzgYw2r8+ZYdDQCxIxaPvT|4y7F+F1ZkLGqO8J7Q0FEAs2`-1%u-ce zB!+`+dG*9Ocz<<0(Hb9dwpHI~Z+(_jL$gvBUDZajGM{3!>rL~ClIvR2aV-L=GPt(# z^9eMNU`(-argjBAb@mtQr=|MI#8Vzu-bn?zGPQ2%uh2D|)|>Pb9nQqN_4-L=M@7%{ z@ytyGYQ&o1Q5Hx_1J;CN8@QT4Ggt4RJA1Qh8;FO%o!o}vHn3$*L#FrS#;jB`8$&Ri z8i||n=IKTZ>f1)50d%%zW08%*#*HggCoTA*m}?d_74_7NCfMb@R6Wu})T^OAMXD82 zts09cD#m;1tVS1$(MfGvquHRQaN^IaYnqCdPHVpvrq-rE4Me`%l(Ecj1`3v_=bCZ1 z-!~Ht95Z50)Njrx4rne0!6s%jXTBWUg6m$>LNwQPG29>!8QfabQCX%)SMRhCnRRy) zdkyBRh2<6sLExfj2>7vum?NGqzqh4mBhdAlR-&2d4t%L*xHOUmXM-#q4Jx!%!c2!A z)@&`dfST`Gix1G|l{Ua@nfkVk$OWarwixKQs+QPTi;q2R#ff;=qn*IET6Ix7Vd=X} z^0jc6wPTXDwBuSW+A~S-w`Y>lI*0}>9RHKzt`)f`XcsP%hqE&~i0&cB?WtKEMDHTq z9H=9*cHNv0_mp zRQvrH(M`?mBrfTYG&W>#x+R?wFPvjRR*n(M#}|fXQq2F9I<~W@i>}Y;EQ;`PO=rJ?} z@)&+Q;28b(j$@$cOVrY1#Bpg8$+YqrBq-p0QOM zHOi&_k=Fh9~mbZ8#mYl^Dw{@0|FAeb-!7wm>r z!Jd%xI@PQv{ERa)3A+ZnOfabJdL;4TjC`!b7VhJ zSDoBTG%WP=H^s@Iq|lUBX$lulXy+t%b_fW&>A<~C?=+{ToZ+r(Khm?ERxRbsC?{G_Bo`Yx@I- zhwQz5M9sFIR1)Y4b7Z9528vx_fm{++yS}28$-DX>gL|km_kHZ3{&WXTI{bpJxIUJIJXY42H;9YS)ghh-}GdIxkTVVbhaV8B@OCHyH(x* zu>Wp#?f}sk?4LkSOVk?!;LM&5CNRQ}ef;#Pr&A*YGg@KN`4ak*HiZn_-JF`S+*TDe=0Der;HSJh{yv##f! z0X8pF!_E*B`yHA9gy@>3HqK9%L}+d_b|Ok~?O99TY|O}6eKGG@C>;F-8<%=4*JR_z zJu2_vR>RNa?xvq9NSQaC3H7yA%2{04^(-#D`7CjAuUn`$^cygy5ql8z=@Ti)2*u3@ zD7xlD#jdiV*d*;ZTjYTDCTEL?qxRBK``^zIjna3Ja*%oYFhbmNHvH=nwfJnt;M=oB zUcq#h7bW6AvTLxQ7(FWU95LV`j!XC;5CvWYh&`miKQBVR4u;W*MlwnzP~P9re1&?q zVKsL-Dm4coeQEmMoaI#PTj#DnFvuN2b*VvbrgM!buU>#r93@G;^Vu?`kW<-17?D!ZQ_VV&|F)_w3=9M2e9(%{czVg1eh&4jYRhg48 zS6;40O%jjGvMJv|9xlJ+E(A*ezHAC2%;n`*O%>bydWqEXVWrA? zHh$ho7rd8Lkma-VITDDH7!xL0Ip5RhI}0()Un|%kvwd*1Tx!v`<;2i%I;!GfJJ%Si zK9~;9K2W}II#j;qeN+Phpw-`6g!Hy?e|bG64hgZfyy!m64Mauxl>0>hdrZrxKOma> zLkH$hD8ovNTJxaT7y90Lc=;hw6#7~}l<#;*+~fCN-8fe~;=WQ%=ZSSDX-9F`C)SoV1|S%v^xLhs)Q_RV`l>pNF33 z{L&(o_mYUVcxoXtf=LD$!txIU8BfiNm!eEugV_U6yZ9wx1TIQ!qHEHLcWz8Sa%^p) zWGrpPvyfM6!xwb=ou3}!N?4>e7LZTYzqm$gg(k)mV+|o;`W^9aenk}h+g(ux?$5hA zceUs^c;Y<58VePSBhYpU!9AUELV^w<;0EM@d4b*`6T#t~ND6Sr(I_Hsp{iI7JAS16 z&}xJ+`0V!@{21(D^6O$~%Y?4}ygQY#Mzj!%RhKoQkhI|pd;;LrYeZCth2=BWk|Wru z!f&G17pmk#Pzk0 zs8`+*t?^m@!#m<6#O(Bzrbbp765Ga}@*(evZnAI(3kuYuY#)8+>5oN^d(@-v37mdc zpT8%HYkf*DG0Isui%D!nOjhrGpt4@|d>_e@&1&fTqQv;bsik>gY*Dk`7ma#-L|UYm z(tO3f59p~Ac?t}4xWSe3(%1lTJS}Dzn+O1nfOzRQKB6bYfA6aLABwu=MIV3?@v$1Y zL9{4v#xTN#+WT;;i?ziu?x#11^N?uB-Y71qvz}n^rU$E~tK7zBHFcx-M|15WY0Ent z00v%4;c5)1{}i|>4})@GaFxhC41VQ{J`xX#Lj4wrdMpv8>b3-&4#>S*{*RBv?Xrdk zQ0?C=+MB#Xg~B^*lNu5ATYum`Iy!GXL0mExmG|EQ?;{V+Q{in8=Y;vH+cwcgmd#fq zw~4`2{?ay4RPz8iGFtAk@<{q9-n~tf)H_JUHT6J7KYM_Sqj^f{*k!K@F)*}AU#+F4 zRbYmThAS#C$^4v{hH>vpwXH(5t*MhgCTyF3x5D^JMYfApgvH6*Mc0Jd(@`8_uZrly z^dUe2O3pL(kKh@eUb-a+mIfAJ%?q~X7NzT=c&j|!Drlv+RX_x_idQjpm5`OnRXTwI zg%0EzxS-1Ez9AKbJ7YN?+(wmj99>LOn)`kcMQZwI;_Q&CMqHA;0}g+=>aqju!BBSI z4uPL+oUg|25a!ua7O;>svygFz1_8Jqi4K~0Sc~JY30-u`a?*>*Ml^5&>gQ-1iA%QclcZve-p2r&NzG#YtZCd%qO*k4t1ILTz{tekEp1+^-nZg{e-~ju zOL7Am71CfAlfxrSwT=Q|`_LccOjP_nwIf=)n(a>UW~4_)$Ioo!CH& zv3o>aQsqbYh~j+5FT@Ru?iOMM)wg>R4J;U!iZb_#1Op2~I+Q(0B#Ub&$)Rd3)8tx? zfmQsj4Q$3<(F(Y(*$Z-hP`mevah|r-Y$x+tCfEyQyD>uW)O{2((qt)dk0egW5bE6n zA}?-?NQh-oJ;!Xm-X{hSBRYOBE=lU7TS6y9765B6r~8P?YOPT^V1NEzbne5}n9q@i ziRh7%5bWwOp~X5IcK*Q8VUKab2p<=LK2=0M?y4U|16K>Nf)p2lV;BOgr`vdv^-Py0 z3xwMy38ZQ*Q}>W$7h<5=RR|jcGSMGJ6VGp)^rIN-J@oulv~u45KuA`pp5M;O*L;ng4uP!eA5R0qw}4A2bHRGXX_b6~=cw?!7fr)LB7kI^^^Ac?>yFrw`Q~LD@jOtd<7l zj|c?jr^p5#&&e$$2cyt|N*FoA-;r&(S;%8OExwz2BL1No9gmgbH zzhGEyy z7o0(6CUPULd#Ki5lLL&0WXTSY=0}cuD9w@`;2s{z zk~v9pQZiMo^18*b=)5&wP9#2M7RbLgxjAwQ zXMKUJJh|bEuH%y%ti00X28r^7h=S~iR1Uv{;W_*h3jd27{?COn9(?bcBG9jDiSNbMqnH z8n8*K^BTzU#E2aYBr|4NLz$~uHkA2P$PdFC$^!3UTKu7%S9q|YEc71QHsXi!laG;C z3r`vTPi=it6ZxVSjGTb2A>PAp#d1K=Z)ANGdFVmdeu+lnBM&$8nt8(w4k=UMh^#P%o88Ui80@7wzqzdM^>+F2 zYNJ&+9zD-MfQ28F;!abm9pm_iCNai+*4*nfq&uZ8)E}y5E#w#CS!J}87kHi0Z0E3} zzSO;is_iZ0%zoZ!DLd3m^yfIyjm~PdlHEL!4Q(Zd462frOLyHShmMAsjsz<8nC)g! zDWx4ZB>ZevJhuFgCd4mqEj!ZU!MxV8A$`8tTK)sk;UAW4Y;YbDW)D%)!1b%Tu#Ifi zgViw~W$v&AzkucQ5vIg|;Z`M;#ooVoEfXPxDMuJTs(0GRTu&y|I^kw*tBsB7s+n!& z$RjuXzxCfU$;-6rAdjWbksYMPUZ;b0<@=q_t{wGrSw|fSt#LjxJ89Q_R;QznkZM)y zlxrQ8Q%FVU1i-`{L3ZI-UCVn-9BCwc9ZSmwMKQviC=eBZ&o+XJ*>m(cNQ;s zd%G5XRllGx#D(EqD$-51{BszME5YE|5LvOeq#oXtg8b#(*ULKUPD_q*3?9focdU%( z>#8-*!&q2;ZC+?Rnq*~4Pnnbh#@U`KIpASEWkG0_&VQ+~J!STh&3OyI7r~e(`>;lT+()+Z7?U+PA^+U6uU;A$)R#T>$i8|udtN`J@zeXu+UnW9 zsO{zxHuXga;H=H4LwzN$2sA=nOoS7fmUx1n)laq(KdHC-A<^a9%Km{i@TyrkDxuOyO(!Zr4z@#Mj!Gg~6axt}i^38Ta|Dir z64V}Q`v%I^I5rVELAJ?hmSZPZ`H@?CAnLmT-Rt;Q!(n30=2r)YkTQ^xZ zx~0ewa%u8BjN9H|{mZR|`}(WYGPy_f$tg11MWUguMn_vDezJKU#`99EAst|dP*Na7 z>ru(`58|pBqNE)+`_b#fVtDw z*G`oUGgdQGg4U)KsD|2qsysP#X#ND=J3DwN!t49glS5@MId;6-KUB8QnaX2Q(3*s6 z%7a!JU11N^JN7hLfSPBWCY#7R#;ftC$u^A>*Cm+e8Dwu6m3Xc`K23Y&UrtM&1!ACG z0{u1J=}gDdGS8)wQ&}mAsbuC27y@>{g^foW7@FVf$iJ0CZbE31Z>g6 z{xr8psR?JuwqmM!<_sAj3O+tVwyR1(wOXUvo~bD~`OGR5%%*v)OTnFI%8O(39t3ac z1ZU8?r3=mtkPhjcy+1F(8yK>)vv=D#JX+n3W@j(baIiF;?4lJ8OwD$FLBK-BA>sgE zoq!I}uNr-p%$1YJtI20oC!!-0D9%1If!)uR{c4weNj)>Gku(qBgwlBR^0^pd%g>gE z=SA0@oiH$ed$wj^#yK*I(Y4JvKoQgBQRm4{Dt3+xV{v5iIgmQ!@*hKcJu|Ed+VKJV zC^0@4k;uE}fgY&np7UgV`ZR{YI(}6xhUwxHhUwzVhshC%)qWeMUA+7Lgy9&&-Ef4G z5MDTP$8gQC$A&Y*-W{$PwsW}blBBe}ei0(G3sp9|3O;zxO)BA^S_yx`0p%J=__ru_N0O^Pa@OaV3uOmq zsgiD#G3hE2%8A4-l--j$X2zva$7z4oahi^LGLy#$Fp!3bOoxlP7kQZ<%5Z{pI0B50 zhg~GE!5H!3MF?YFQpUxy5YbG_i)EQ7_oK-J`78HLtcV~f&cjpcB}w51O{}QvL6^wf zs!OCqGuVpp>gG%2|G}2WrkBe5J=!lTJz@@wqfK9B+L`arN2a~rKV^q#JO@UT1}E6W z-Y7n}g}b(l9cs)!Wk;{uYBw6}`De17AW69-F-h6H6l=7uu@d5*u}&`4miA~V8|%7K zZLD9F%6?@1ZAM7m7<=jnoU+roEKF@1UM8D+W_Q|U@>mb{1D7StOIBT`rvST0R+*(5 zFk+Vtk;@a3r?gfVNA3y-xhswcpyG1b*PH0H94Q-nxScgJ0k?-n{w>^Et5dI#H9ZRd zCW2j5y(=Zo^r)Avgu7j?KDiR{^z-TuJjGsc{Dn?R`;%6V=@qZO{I%W3M@;onYKc<} zybs@0_l=TG9LM8aludK`57qml?LpEPtFcE*yT{pOKf$XjD7 zxe&cJYg{dJs=5$;&p5e|9#yu} z%azCvB?hNRJ<<=+>eh%qz1C=$SXB+ZPBzQZfuTO~g}_jW8)R*h3y*ZlYt?nKs8&)_ zoF_Z#);n*OMSYT6n`lk!$hTe@3l<&qakGinBS-bT=5_hH>tT-7Qq=0tD5Ac3H)4pG zt9soa8`jc1rrQd@bX|Vk4YK|v%cycB9m_cw==6EfNDVML{)7!A`X9>ZPBAl`B6Q<5 zYDo9O;cf@4#3_V$fOgVx3YD{*=2rtWI<9@tag?tx9Bv`B>(D|HYqXWv850}nX`wEw! zB&VvbC_^&-CpEhaK?SDh&z8w1*xkFmOjg8WiLb{(huzSm+TiBIaU7bwH(rM(Sa9v4 zUK=NCH_?b8E8!k_hNc3;^e8kwVaJ8~V;oYOZl1H#c-ii+_Saq&O^~(zYO52(6m@6< zR@A}pf{8L(DKX|U{*;NbyAhWK-MxdH#X}Qio4$!j3Z8B7sO_&N=uZj}Ozbk^fs>?; zscOzGNsEApw@fwi7TLa7k$Qw{!ERy6By7Lo6_IhfMe5vJeP8yC$D)QWH>3BRezt%tYk8?DwCP8d0c_avB0lM+tu>1aISw4%;YctPH$A~dm`vCRBba@jc%tzk~eQ_@l zc;a5HZFo_wKqK=>tc4N9>8MUa<%>enICiK4G8)ug7A;A0;8j;I{L$vy@F!DF-T-1 zd_<;8TJ}3(dIfzG?FHlL#12MIh$9z@spvhAV9#BXCKygAtmk z$q!)}RO^C|TBM;V$(A&W&<$V`xc(uTUq=%Zgv2soj<|g~1GnVNzjwB*H&i!Ab`pGM z=`ePPIw5jRmEVyNE`xw-p6!)Wl(fVy1ru(_2u4hU>Vf?4-az@lHs_%10`_$ zxV}S)`EsW5C`EQOX-nm6E*7ZA=OYwfqqfeM1)caM-c^}-SvZZJfNg0D2;t(gN9}Kwk?2jVO)6s{%t@U*vS5?pYv<|1Lp$7s$KB0u@^* zyMTh{7Rvm*I7b;rD8B`91IOJ9aj{UAOQ35thZe~O$%x}-Kt{adV)&0W>fFULFS!DI z7=E1dQFYs5Ik4T<1>khT>9K#}=VBp%`3sYa^@N?~@{g&MM`f$X<5~(qXq_+~qX&G$ z@8*oLZX88-xjm+aJ}M(^tGodKr!oR}O!pZMeijhNJ@9ikIGDNMQCSpAoHqLZ*3d#) zO0l;!fqur8Zh!DwyYUkZ+2n1K!1IVh)#F)~Mn`w6&!vx5%Gmg^-1)b#Er0VdthT}F zfB3j;S-^e>&dMx+r}iES=|Rmus}?>jPe84oo|dKf82$`~*eBJs&&c64^nCn`>^wYC zDd4b^HR7lgLhIb*ERllahO#Go<_A1SY7-IG>@U@o4FGpZMr!kWKX;#OWE?3>azm-MJm23zhs3RlhV_@f?Dd^y{cUF#dm&H zDb2*Jbt*cBGc9;oUWL16?tWP|O=sC+w=Q3a_v*`X0(jJarEC*s05lMSl@nIVf1&)3 zmGa_-Tuwe)|9&KeL#V<$qy5|!lm%0_uarfYL*W7)1WpY4$yGS-Rj$5TCHFPaAW{H= zev-kYt7n{_ch9Ggn6H?|uScpsUV(dGuG*}|Qr$vTx>^ng5^t@RvpQ=CblQ@WctQ9z z=Lg)8!Q=6fJfX(Q?Ir|>54|eeV!42v}h@DFiLmbVDt%50NKAF;&QM2gEctNAXb)t_lCSd;Is5C?E0v)ir_JR z_-QM|dECh=wcs7uF8^irwN%dM-Z5q&<{%_q#_dXSVgNv{e_yT=jrg)fnq#y9)qX4e zGUG`?rIT#qG7eY%s$mk}bW5{7tfcA-6=w)wUhF39R3xZrB8~SgmGm z0;gB2r#H#Yp%0&6T*`mgB-ct@4fw)lY^__WHg85AWV!lfGseRuDzb$Odv1|MVGhUg zt+Eu{279S4zjX^7&Z}zm7CEHW5@Lbgzy+Q`?gv!yr?MEYPyQ60;}U^S<&)wy)o80c z6aB=tqMxN|mi~BtE39aV>bXre?xp*Iz;P!;2b#rVDu%R;e++h*F`pqdKY~tJksN_y z9q7^Sd1`=;m#T%^WTTdjD$Nx1TQ)vx;n$$oxf`=sHjL5_L_w2xP5|0y^6NGnQC+GE zD`ewioGV!&;0_UirfAjy40c+$Z;dxdWau~it$-6ly|wE63V(a`LVP93Y4hu zc4VoRs@B_O)0}VI`t+k(##(wp$Cq!Hk3bi)Ka+Ft@yut4aTcjQJCKZ6qaN5Ho5M2J z?2sp-F!yu$77BZQDVyTru#7LTzPW~gM1yM14r~q|`i0ERtsr`^`Xh|yCM0^-~puKy`Tknz0 zL~VWMALfclruHqH)Ma~Revu{}3tPu$K-h$Y?w>5bLe1SPj|cB}@0F*99(GcO{q|w< zwNc%@598t*^~gS~I=ioa*eAPs4;{ZpcE^2F`n_xqgdhA~-s_ch{XzDqw~=J1&mtAe zZrJ*7O0xdjA7lGeGk%bL#Ybw}5Av1*@7@C1Oi5VsJc1^hAFHw-oPmx zd&y7oc(1Qpf09kThw`7W9`0gx@F!$&-G|QmWqa>o)P77_-8b|0%g!}76SB@$dP);- zDc`*xh9f>znFr)m#=CS&YBUSj(XTw=Mz^U4WL_>eGP5Ash(O$Gt<>?dT6q8$9c)y2 zzd)25?)_C3)vO>RMO$oy<11S~RG0sPfp??Y{tHBTxH_$VMJix}8vLugCJ|hnJPeWu zSgop1^MAv}KxTYYuP{+&h^~!+YJe;_7yM=}FggMVbedbktE>KBbKe0URnhgm_s-m# z4jTvwNq}rZODGbGbTFYKEh;KaLP$0cNZsAg9wk9Qu|cRKN>f28LJ~k11qBq9qKIP0 zf*>f^qF@E(`%k%>&8pAyzTfx#-rpO4a(B+moH=vm%$YN1&YkVi*$z zj!HgF*s!Z0{WP0)6-1t5_g_V4Ut`r*5r+LaJAG9@)m7B$s^@Ul*b+58O;!|ByS_YS z5n@?iObh(X!G+VO*$;of4S$N&`%|cjoIl~P`JAo&Q~yAbA>K#?zFIPx5Q=)6uZgYB ziEGf^pRWujd%-$~L1I~+Nt*tB-Dw8lUFfKhIM9AjC(YP2jfA4TQjHu8dWVy07g)L~ z1mf;!Y*SR@96^)QVw#)$YBQm$bApLYK~6IkOvsi9g;QaEk&v#@7N5lTlYDGq zp;Qo;LP=kNU@IKq=`U&NpTP{}!^k`LZ^%Nw?lVYoh-ffLNBQUfUHK*ipOa?IO_JJ(#pd>Qpc()sC^7Ldd!H$2k+jqd3>0(;(^g6CA>0zwj-$!4VDN zRKs=Iu$zN%Zk-)UZVoyo3h3;PFcQb!3nk^ib$b}ujs}cyG*~L=>1`k1$}2uTOe3I4NmCaNf0RFJrC6 zm6N&k$#vSBY-N4YO*_fns89Ab5GsTl1N=4ud}0-sPCjBMD{yerA(BMryd}EGKk&l$ z$%vZOaM63bU4r@NrFiFXEaDFqVu)`ersG|CztXsugqOCf%5cZ=qywDU5=r>|{Kq57 z-O2zTnA-sTtHcL$NTGI~{n7x8+lf!)5PTJpQ<%ta99^Noc(I>j zTN;wgkn^JVhP{GCMv-->S`|fF!~XgpiZp3$Svw)p+6nfzb`Xu<6?ay^H&Mmn8<9TR z7i?@JQVgyhZA9GKm#l9zxgDuh(WGmKuSDbM8A*RwngKN+4nd%^Yn^R{F?3$Gcva?0aI#2t{^0tBEC1|1Y$k z`~OS(kvLKYkr^6K#>3VLO(gN`Ks=d-YwWHik|ghlCgdAHn+R+9XZ3E&GwXbwo^zPQCd(0QQA{v(|N#2H>e>A@c* zrH^%l*4)f)>PV*{v+z06AKjSu3>nP3QPYj2ATzxKo$!BE(;L?vD@l@6Ls<3XE+;6R zPi=~rG3(HzlI`t*6}*xa_8@(f){@2H8TVzr*Mn2e5_&?HZ)QDul2$>VtCw)D@x+xK zu+iWWY1fPJ9b9!!($xCo7rwv1KIlmt|E&roy$IhTt?VU)<47;^HB!&?2EVG<$Gt@x zzxRe@R(c2aA&+Z8YgPN-^(C8dbJL0&P;bBY@C~p6k}N+D519^eG{hF(?s3_F{1xd7Im=ttThe&9+!(h-}A_Wj8tT9nV`=BH$@ zD0k4Q0ptiuY#K-&_A6odmAGjzR?A9uZ7`|CPNZ@Ou|8{nNGBXI;lGff;$|xKSpx%i z#os)XXqu0A<8CC$K4xpfNJF;fMpD21o0Wq7TuX8R4CnDU{6B{#_7Lh-qTTYK58wZg z^}@nMX_TSizx08feDal-fu%xAHi49O71lG_smRd+!HQLEr z$qPwX|p2)WTh44{Y@ zz;QnK^t+36Wab1iNHMFP@L~yO&2#%POVH=XEdDJ(KN|HkM~GlnJ+O;2>rXdH(0*gh zJkX2RSmYi4yx}G5ao*GeZ-(DNo|nTH%N!HQ0+c>hDjD=msUL&5a}CCYu*mS49{e2Sz?K1F8vCU zKJ=?4@+*E2?g65!^FZ{Z$)sVeQ1SY9PM{raFPcp9k|fc1|4phOJq`P9oxjoQ;}2_? zO**!hVg@^e(-@+-7*bmbQi}vaS|Q56v65`k2;*>Xwvelx+2m1hY-A3350A&3q&XYn z6siHg+vPp5o6bUfd^QHB-o1P`-F7c?i8~k|(9BPo7V@;6&gAKtl)@ zYIq=D_>p>$k}mOKMohmT6oS;rf#TEe<2o691tDpqB z>IT*-pR^8It9(qO^GULD;EKS19{Ek+z+IeAMu28Nt=5sXb(1ibT0naJuhDPQ3oZTD zj&&>|Q$X?mgpzx30qav@DY=2HjmwXrV_ZIlim)#puqnIvFR}^~ecqkK zgU8QzlD3F4PP&VDe5_l0S0L+j!MgYFf_;Atq3+UD=|0!TD!gJ~v1;F3KUN9#PNY>gd;-7VwEygP?R4xLSrF-QH&j!QWm%zt8aVmWKWlo*`Tdexn$ema-WjKY2|-l z8{*HDlGN1_%vTs)D9OF$Vl6peE|lc5d1NP;Ht=rFJMZ&%!&%eHvLLY#!Kj8~0s2pv zE#mb#%hq5c7m(f!L|NdjE$LmefEXIy%zk_!$!T~Q+h!X-noieBd88Tkpe*(tvO{}` z9lnQ*#-r)I;^DfN^pYFRB#rws9QYpYq<(st{b%;ly`*IWwWC88%q?UA{@cBzT@wk1 zg^`YGfMdf?P|cymqeS{55*zdjr`=xV%~}MX6<*W2=RT4Uy^Jpe{6<}Vd6|p~vfsau z?Y)mY6J*)ov#Us=&jx?2iu7fl+)u)T)MkMJ63v3E;jCO&iB4i0=%^+wSicA0e6{xS zUVQv-uiv)FZ{y@GdI)? zYL6^jS+(6;Lgn4cpf<-kf85FFQZgS4_@bqxM*?2E7e|V`J3=C4cdU~~c6up^^0jnj zDM|5dwAwBcJLRFvNRs;aCkRDDjRUu5_b&6R<(0MGMJ@E>*JW_H3SZ4cJd~T})N(T3 zw}b1m0^HidJS(u)y{C+a$5)UJKJl5b%5MudZxw08-e9m7nyw`Me01lo^rHY}8j|!k zE7{X4NfRF$K%3Rsu>z3FK7E)(H-p0^{l~fiSGbob-ICux0&Waag&9ypv#(awitZ{# zMue$dD<3IVvXVrxd`2ST{KpS`{C5qA9(D`5q|UU$9wyxe`XS|-1Hp4)=>lb9J**~x z;G+!u5gv~(GK4{A`kV~^v85=Mu!PLwHJR!Q69{{R z+^ub3iyt9N5e?#a6bso-mi8z%xm(!MM@g!+IcA3+#pJiP%{7mb4k+MQLngT<4SD^o=CPKM})Xp28;M{6_e(wkuyl#3nMCvv?Dh z*_~|7Ch{O2DNmAic-;0R>F!@Yj#WNMZuEx;jJ-`b(XcybBc32JtoSJsT&IY4)l;OW z)NL*|Y$NH7MJxPfdv2omHxm0a#7Rwf z@1mzcsn5(fvYjN<7r`Iau`tZDU$$e}d6YHT0YB&CY|;+WTU*Q4?ttF4yhN|_gyl-S zvV$c3HyvB3QddK`E8)}fQw)FW)A~y-Qbk6jJShgo!NrEypAv~HnnuJm_|ms!X_vs& zu~xhM?ZP`#p;{)*YRgW3uP?)3KDodUQ`-#}Evg(8IA@5-44w7Hm zD_^55hm%%EBo9f^9e7iM09$1GT#n5ewDQAPR1#jsWVZa4*S$=}!MMEdWs>WI{robH zX|A#%he$6pv)~XWpQU9h4`FPr#HB;zK3`(~E2O6{ap)C{x>c;^74p4rsXYIxpV)-G zCK}Iu4QuwlC2=I{b(q*e)9}Njy?@bK2kg=#WMl*!Z8!^6d|Gvc-0$lXvUxwhIRbxb zB|c+_L8)T5A0>&2(tZMug^!o7c7Zm?>Z7E2OTXE`*TN<^qe5Fayl4V8`%J9QkCLXf z1`Wz32O^6*Mw<5zK-R^Fj0bL6$b@&4x77~ADw=BNm&?kJk$B5A#Yq7#Z5?8A%J3cY z7V@xE=O&%^&@r5TYCF9l$H@Q+kBE--&Qn* z{ae@;T?>XX*6W@H?1>9d^oI@bD#z!YIt)Kd;$J z&@gCA#3G{(p7=L6$G=PZgl*v&{4H`UPfxr{TI303fSIXVb@;hI_yj;TZy-Xmknn90 zXQjZ6O%VTZAukHsoEMCPqdF`JJhsXMM{Rl7@2T$ztAEIQq{$S4L)A;yu$JlR_*R`i z?Y?FaC9+xe1_h$*uy^K{o{RdLW_|Ig3?0~26n*a=9`sxKDJ+cNXFD+ z3g(!CF@I|@@guPDA8}0Vo{z}H#(SWu(Q2@shS)nCbeid3@rwoava27FB>O(WM$Uin zI`Tet-6@=N>|_aVQ+_^;u*MkX_!x@+l(*z#aw`wA@9_>@&vHK{*@#U7EQ#ImF6H~1 z&)=o|sM@MR*~s^34;Foz+^BzWk==P(@5F-Mrz&t#Ay2^@xi@@nZA#j(vae|u;NUwG zhqZ?c`$W@>GHZm>qpcBU>1WW(N|rAl>h_#j&^jK87-2joP~x9GE^ zx!hVJlEhzNq)?%pVL!meo+ZUmQdVQ?lwzNpC1axGMCF16yFv^rLKB95!4ERoiZ95b zNKq9pd?BiVzaE7H*!Jge(u8=5&F5fPR$i&salO!VsLd9waLHoE9b((a~b zc&(l`9%cr?7TtB|{FGJ5xo8U0?~sNd)Bj@I##iLF+O}=H!?rpd=JB#~z9O$foCaTj zO}UdTxIohIc;x~a1&%ekNcxAY7ro&jHCY!)OJv@Akz^t8_`*eU5|0 zU<4zq+gE!%-;mF>#;?*eX2XY2xdBu;UDIeL&AF*gwu+U2o%?ft? zC6Xe8e8gw*41}^I`a)cM60PG<1T%Z#C2X}S*|^JuznbyXWpQYJ>@uP&HnZO^ljrc* z{SzWZwy=bs$%MEgqAM7UP@Jj+DHRlkLxu{r@@LX2L}(jyeH}aWGuZ;fg}(^IC+?&% z@ey2|g>oMX!i(gpiQqG?$|~8nzYx4y;BEaY0?fEk{~KH&FSA#EBgcSb>F)x`r@ss8 z^ebS{W_;L<#P<<8kYj|(;|3H&P_88iQQ`MU5FQcAbeyvf*%6}l*BugRaaNq>2*3DX|$QPdu1m6ty(#k_R`*1 z`8*vH>7!KyOL0P*YxIbAlzpqw8}aC_(*fY{T{`WG)NY*)$3r7DHCgbRzu95m&U=MP zY1p@lU;Jh!sB@)A*jPe)pweE_{FN1-GwE-*d2?3~{RI(p+k&CzkFyiOv08zQGcfSf6wEz>jQeI9*!L$2WvY+mCyPM!;1N zcZ^SodU{YOW`&(g9Wrs1PWpy#5s*lewB78CG2LM>Vws9vak z(GuhW&CuV8r#e20#$Y#mSIjmZvhL3T&(#bL;Bgeks%T; zaN2PhVH|k6pACpsu)e%M z;%JNl*Y~S}cAAC2wiMldhGXay^%n2_z=&u?QL;V++*=k;-_V}*PHIA%YY0YP(3IX3 zCn>R~M?ztpu?>pHHr68L`=&HoHP@gnxG5`dMjQAjtW%{aJJ5`VLSRw7Df_A!EwQ+b zph*<`v%JXr3r13bC0QY?Hdf5CIK~C1jR^ z_E0kfQ4Wbz%n&|flY;mR3Bn9H?x6jnPDmC-`mtyiYnezVrdM!X6b^Ja&r6>>jei)L z@5yxhvRgA9q=xvD8f0a_C9s(tN~B5k#59xG5pT*aCqf){vG^o;dO`?Nw*fO9w zy|bZO#_(d6T+etfPBy10{*rIDpl3s+$;$l!qxsbQO=bU1?8we1<3(pIK%3S<>)Fq(3L)n$JlPP zHP)GV-6(gl|Im%b0~pbr@_5Z|-I0d~weEBT9@~*pFX(Vni6)6NC>}hT&UblN+4tS) z_1YG1%N{gZ^Lg|7^`y1lyuzM>#s|g2yR|2MOv~COS?P0TNq^-2x=`Fjuk#K0fo%)Q z{ohYl;Nx|{Acx;sb55ODJl&fvT~A%Nh|RKzSRm$y_(kj?HAD&~#N;Pm}+_M6#h4J9q{kLwwKP8k6Za z(p9{X5!mlmvu?wLS#sMjdY$%-ci}L){r?(k94i|^b81n?js&}_Sl^NK#xN^*miJ>h z;O)NS(~Qq07`VZf?ISrTp&M(V?CeN74tV>H0{ z6YYlLM{lAfxKXb6%@~EBy|ZtoGqs?@>O|z1TWCVi6{Uijj;5ng-ZffC#_G{{UtlLY zG8(O5D>GW~HT+gg=Uv{ew^FJN@^w$VXKgJP=q@hM;N#|Zv=_#q4)J*_KldYIGqH4( z-$v8G*MqmwCIQ|{6=iFkgZy?I{fjHK3xA7Q_$Rxd39(n<%TfC6w5}^OEhzxmoZIPN zT%p5#$cFnN^SMF?B*MDqN^^i;E<1fYjjBWU)WD|fV`y!ZZ_hEbX(u(cAT0S7&U_#7 zs=f4bZCVTAL=_Y8zA>~@lTBR8_@NWl5PoH!V6)}Cet!(D4M(^(hI*RH;+*#=9~@Y| zVl1uoV59s*)^Hs3?IzY{99@VZuNntQtYV*yqXT_j@3!Op>ST8K1CO)+5%G>Jn!*NT zVq~r3?7~dit%;w^t1j^e4_QvX!VYHASa9l8CcPj0&7DNQ1%F3pL4EtC(!XnOgs&-m zGJO>oj!YKXIyf6sw2HOMrk#CE`oT#Z?4E4D-hGsf@v#oWB6Dbz4pz6$!Th(5V^F6N z`@~5@*z_FQ&c`qd9PwW&*xpGS_}Dwh=|{jLPB7jg;BS!*_jPbUF74LUpO=^_5D&3^ zP7Jmp1nkf$$lwtIK?o1Pp@Q4=#U}OIL$Ov!_4_R^uafw7Ht4jtDs*Fqr8K zT%?9Ocla4a%@jIMYyO*gFV7;p@-#z*XUvU3-2|;%~s@Lt@@p1PJ=@I&AVnA zok60n@Ot91gkW9`1``aKD=fK?c8a^i^Pm)ir^(9__%D%viFpcX4Ty9WVe4AOmKM=; z-xS*Brm^86T#s7gbHYs%Skq$KrKya`huq_64krls5#~1m3XA32|X~tZ8$H%dgfcB-jQQ?c&P@B*`ZL)J<>njn%DgI?7iJRM#pM5PXX- zr;W!iXOxHL`sUp}53LQ(K~5vwGfhJr{3gsdD;HQ+8n(6QC&Gn&6WuCWE0`7}gwsx0 zX(?@)=&vcTDG0HPBW(T)@A}}2mGt>P~F;a`fsIu{GWX75jPX&=pJ@%1Q2e2MV8>5Ix_7Mmt%?IKRtvUd3; zUKj4BT@sb4tW4%0X)uEbUA!F5t>(V-Vb^S7h4X2uvYHL#UW1QInch3+(<44p_wEJs z*PzRarg;l#_qfNU=_F(V`*5tRO(0ChbbF=u!CqQ|x9KWbn+m)}SIr74U_jzku?n~>pJZ=V&=~Gi|EhvI znqA;6B;lZCG-nLNGhCiCCTi)a*wwXKO1qRTT7!w#&4z6>r_7dOtEdI=IEED4!?ge& zgP3C5Pz!JX(80Eu1A5u`D^m^|XHvL&e5Rw&S5LCyW%NE+I=?TYnQ)SfTTX94RK_FAX%naL&2Z|`y<{s;(H}V4 z0)EB2s(K>JPx|Mqb}EZIYEM7O;#bgE%lpC)M*FUyF|mSpE>@b9lDj!?#kpxA&;E+{ z%`bbtFMHhz>>sMw-W4=H&S!Yw1rRv$OSnr4ZStef8Z{zv$-d4*^`UX)%*oA`I>-jz_oyV#+Xv|EIV>;n5ku#E{`MVsmi z7qZr?u$kNGeRma|!QTWO{xD7hzIl?}`7kX*9Q0KrwTD^T)mZ!=W@A@F`meEhGF7>n z&IT~$5n77afc1~iRP9xE;t|?d+sH0F0%f_2-S8;(77I9w!gLq}kfCIK&n9rNHn~qE zd$Gc58p}33O7ol7?KHoN3T8qS=La7?3=59wJFKDnWv0Pva9w647Xdy(-r_aXqxb){ zRyCk7B$cZM{)?*yd%DO*3iEAGt!i-CI%pK}BAmj@PqNnQ=@QI(?|M2$e`q0#+(1{< zCRaU1hc#EOw;Iuyn(A_tuRI{*r2Vn zdt>=N9{6YLrSV$`>{jk7k`JwsX9ni4VY~Bu>=)fnmgAT1P&X}Q3_&F9o2mEaZ z++JJQ;7UR;30fTBvwu}#*Hy`0uA(=Y zl4@35O=DQcYAkP+EUOx;KqcE)P5UM;=L`U6`DI-Yr?P2D8ZMedu*yIS8QDoMYmc#C zchV>DShI^RM%}i%h2fCBTNn=a?xqg{81;+*4u3`fi=UxsK^N5O=zRv~JdZQ_EQaPW zHsM+L|2DH(&w_zf?2%{T>aSu`y*QDpVx9KD(%Qvl?!kP7Yhe#udAnHHb0}HO+CN8I z`a9JoJO{tVi60lpw*+u-zng7-4vkl`*O7w?;m^~a4QdNedEr_Td#kn9gzyWvW< zc~P$49xk?g1FhGpJM7O?2on!v!DL&Aa}=BXIZ;$Vg!beA1mi%c^IqB!9dqub1N0S_ z*tWeiK58$Y*m1l>JHcYy=d+J}v=>KQ&$6I>^jSP!+=ps!vYLI6mSTOM zdY%9AgZl)X5wjZ`EogvbG{*xLxh_F?&aPE&!K+chV&0;?o4g~7@M)%U`P@K=8$aVH zdW$xbL1R2hV>i#isW3+cq1*Ttodb>YmuQ~Dtk=6V$!naXo%wAOJ>CHm5cdG!CKUg> zNS=lI&;rk~ybocY zRIp1Qf~g4Y_=tMyJ;%ux2TVAGeV(z8=sEe03}3y0mfr<}P>IE-pp&++9j73j)$H^s z%40)PKc+V(ie_CP`%Ot)$bIfL^SD{=oB=&@tU$^X_FICZQmd9Iwn zj?GFq&eB1?It6EGTVKxlv$Rv0D#qLAP9Gc|B1}UzO?lIP`Ec^{BDivXx&zmts%u%~ zd4Lu5hnr)-0&)5o=fF`q78FP$f3XHT311Kqj?mOaFFpOm#ZPjY*}wn{I+Q-YtQgw` zt)~qVpag4ukQU5p419}w!xyvw<5=&U7{}Ce^!6|{jwrmJEjvehgdN~w#g|0Pm-o)m zL6O4N#UuevST0;9+n=XN^~I8eP&-S`CZ4D1$$u#eT(uIGj@~J$eC<5AuI{*D-<+rI zdj30s;uNW!9>-wf;8q{X?P4A(h6`N3r00X~Q-gcMS2QVA5SOlTOG*V!`cV;H2Moo4 zE-n3vwjL!&0Owb67I0I~l2$tcJp}}3OZdYzBH#r7E1U>mqFh)O^Zjz#zsF1$b+^EG181)2;cT77{w zsU$HX{t&5WVL$=*o8uQIno#|^OrOOfncBCeK`^rz#1 zN(v+T!7r7+`65hg#^zk4&AVC*^_757)>=0Ih~nf$+NQbGvuIG7L=-FtYiL8dHiJ22 zFr&B`$1ku^t6KF9t%jQYr(+qp>1b`5{Utgw?26YD`XvGVX7sj@w69nUUDHucLMrn1Y~ z@lM8)vN?8%yhYc#Sc(AWjReBOP@NN%it@T2TAM-}2!BrD?~d}kp7pmw;xD@vn6Xh+ z=pE3&YDG{We2FSQAfWsLRXz~jN|hfj2r9GavDxBP1({I`Kdvfd1oYr#RXz}&p~{aA zC_hk@uLI}p{|*V_tQiwf;d{XO5Cp<|sRkyYy#L6iss;k#XI1%Pl=mO94*)OY4FqO5 zRfSmr1dUS-1j2t&<;w#I`c{<>gxgj5imqC89nqz#LIwNodZTkvBgj;BjAq-5{}b># zi)gl+!Zh1LJlEp?_y*qoU5)1#;10q+pEqLCZNsDtkJ0POT4TUDgD2f@q zN!FryoMd$ zYsBcb5$wyoMzaQKBjoV?Y{=|MBUsEnqghHZ;Cw!}4VUmmNc+#6J&LRc*~EQDOs5qA z*Owz>~M1rr@ba zk4Jh#hGx4N&&Bm*`P5rji$+3Z z6~MgNrD|{vu)Y9yIstGnz-9I8VzG~w=@W`7fg1lfMzbX^8qsAL$mdjF3doeZ;$ zt$I@xe2an?Bkiy<@tmnfr3Ua&KlnOTzVWTHJqMnK4U9Px(7+I6@cwNN=tu_O^#PAl z{XT(oBGQv?lTw$b;N@(_ej}mm0KlAk-&it?G?yyt@4TWj^>)n`2H+$-IsbMlDcFE? zJ*zODBLm1ejeO3xvuYC5AkE2Ee=O>N5m{y%BWYWRES~8=nv1mcw^$(^060gMq00Xn zz>{K?KQ%()DMvaA+!v7&=UGd*=2 zTXN8dPDxYwXVtXNK>AKFDQ%Kw`w{rUl|Y_g#}C39uuZKiXU{0=%GsaDE~`K`#L4$} zNny1BRmkVM>0$u<1mL`LOF%mxq2N6E3|G%k#aRc^*%+cKmxdU5%)Ve*?tok4r!j48 z8$43pVlA?fP6EbBvo+gmsF18w`v&&nOGZrSi89SL7WmTcW1qcbL=T;Mzr-(}1lTuY7(M`3;M4ETg`U_Fq`N$dQq_V2R8a-sV(b)es*rx0S6r>xO7Zw9fZVDzvYFdd z`2$FERqz3xe2hO+Qx-qP-)X>spIts=MBZpyD+OtfDwK>gZ`}I3UkyYW;Qay2SSNY# zR=~hb+Q3G=VnmNC-XQDWtfq7Y(k)Rh^)b!X7|(|kFKUqXUv;J^UR&+j9+#c^TG3^H zob3nxo({nI*regfCqT4fUNO>5k*>kB4W1j&b>4aFFKr{M|0?#)?07vuLu~+@53M(V4OI%=o`CXu6ue$Q`2+0ut1yU8sO(Jv*&itQ z`vK@qEBFw_wHnsyH6!81)J>AAv;bzNDfmrlFf))|0O}__rP;2>bCXhW_$mJ2;#%^W z5k0p17Rj7lfy`9!c-2k@(p=)Kzfp?ZV!$H-+^x$0peD`%_UCIxOx)?Mb=S<-6}sSU ztixf7y5M=AuEwyEEkV7q)G9e>hLW`*Nb|w6 z{&p(zG6LYE6niECz5<1-cEXIs^K&I*-Jhw8$D`;?s|)XEYi#ia84ZA71D;%p_9-}i zioZR8bKx@shW!95JOTxALg76UFg&Lfx=j|kXEj?m(4^wY(X|SZ&ord@oY<}4i!JSl zG#~a2UTF*1Ed{hkvlRn=;5p4U2nvE8H6r6`_DW>G1`uJ}$2uH^L1o`3^N%WKH%8i@ z*|QWe4pk2L#b2Z%J{53(MX*-2lNNwyqAEWGa6bhVX8T16)#!cfO%N122?hL@#>PrI zJnYY-FqFy_-cJ-WDv;)e(g_706R@tI2AtFPq%{X#kj$370GF(O0Pe3yTC09^0o;!t zY|SwvBr6R-|MlqxRd@p6ex0-BTO+JmD^|!>2Izxw1#hSDR50^(BO!K!0^gy=z@ixk z_pf8sJfK=jRkc1)#jRrP)OLDnpuC=`8?CGv{|1%;&rlbxh$m!q4=u@>>@E#1$;!#eEGl%)KsqO9W^jqSbTUuRq9p~T`Lh8n z%$b8yg*lmpMV^0=k|Nigv}9J1>n>VS=$tZFV3zH>GhJDQQ_7rWk(rsrS)Qq)rJ~8x zoY|goEu6z#Zl}nZr`a?4kYrAC7Zu8~cWV)uGU)>2XE^8k(zzH%r)z=M$Qn_JGE>4A zY7OebJ9EVMXbtN^Gp9ReyYKxA4%yZscJO`JIQMBmnVCX3?$@FNs+Sh#fzrYUYViuv zW&Hgnobzt~Ecd;gp6Z;%G%-W+ zJVjX#OKe%5qP(tMR%@XG9a6bnyF9|a`@m?1D~KXKG@3=P(S#tQrM$u^Zp^g8>@3e( zmj0m;-FThqk;-!x=D26(d8V#sMIRcC%Qk2YMGsVNUSW=NR#EO_b)hb2aXzSgToZHD zYDGaa3!exqJ%jUYqb8)xD(x=L&+}~3qWxQQdqT2Y9zo8Nto}zv%)qBKF+r?KpsHYV z-Bvj$GvAr(*-{smfV=XhO!aJK(?2p|{3r|_kHY7)Q^3kF8cf|H|7DdU&P@vByGR!uT>u^PO$L+ zn~2=9gOb=8S^1^TOiuDk{(z}C%y?OLQI^1J0oCI?Bnv6+0VVN@npBvANWCg~X{lJ( zY(8sWle1LfL7~H17|=fG?@mvV*cm3(%CEt_tMA9QxWA{|P;?8_uQYABl2K_*5D3NBV z7Uhcgp&XYwu#e<0D3LxT8&zy^Idea*1HhPk;!}*!P@if|*@jPz=+I1w^D`|@q+4(` ze7bgkv9^7#)vN926j#yAGi=?bMq_eTYr;PI)M$dw!o2sX;V3&Nxh@%+g_&BIb6zS^ zK2g4`Gi+{H2w!oT$Wqf6Hb`!!utG3baxTdEr#ize2`*F@{nbz=#9e0E^v8KX_w zHLZTWbC%Q9v*%p#Oin4+8!8Cw7*DdNXu7koP0BnUG#AF;)J)8v+j%$`02UU9}4DwHhOd+`3d3>$=N1#W{3uyI_2u{Y2C~gbAP)Yr1l~^bQ4p> zQu?%5pLV3K;$S{Hh@N zf`1D+c{5(r>-$h3Sx}U--w)!RnwRUzKM)9U9aIpjk<9Fu^agTsBep+furxiGtuO2K zI23yz1vsQfd_|As>8w&ukq`E&PP1K}&adggJPXEmr^9-X+mn-t zwPk$o+}ejcmjSy>J&m4tqw`VJQJ=FXMrcF|#^B|$Ts=W9X5MeaOL z-VEoLdX!ZP-Onn>%g)Tn@??GGL!OzNpEbpOL8du9*+tIW+>0_@BBToGuO;6+E`+xT zs@>m|$s(Lr9_UEdw@@yU0RVg_c?IGXH@^2#?}T0Ks<8w{NDS}p52{7*9g232^G7wO zuxH3bcP}Xs5>Ymp&RNBm%k+l4xe_sdTo6~WPx=W|04Id1IN$lRY*dJ4Ugu7~C>$lQ zOEWqDf3@-@j^9+0XQTML+kMq<%svEhZs?_!9hHLLzMAOQ?*ubg2$m}=|6x@KR?%uARVjD1Knk+5J&zDE@9Wfhln6yxRtGq1jpB~S zRa%(6RwZ&_FS*tc$*F8-UjBL_)F~Iqyh6SvY#>s%LFTvvth8g-#}p*5z=AxkxWmh1 z_@4kr(8IF)OemG<9Mwh+$#P|Rz;*6+%ics9)CF*EJgGPdd718X6*}{uB2j`T!VpFQ zScI;u0{3P$Kmt6ovnOkEzHfR6JP<_P8zspBG3)n6fAW!-2%9SJyPFnolr^Qi?9uc6U60!#!W6o@Y|F^XO1Jk zsBlV36+2zuY>b%b-|CwQop%!0_^K>ao73YQe@8O6>)NF3Lf131XJus;m%5ypGxA)X z(yaX5L+ zV8PL*9UwH0RVZP>_+bD#+zQAkclNZT4S>*jj z=3w0Tln`xZYX{i;Xmf7RK{A7Hv{?Qa7?v$!%-hIIWDrZAU^ZgQV$6=Xp6*bL*&1IW z|0c$4hbsVM8=D1qS?b=#<`R4^F(%ebKyZ6`tl5MdAt|h+h3R15wKPL;qZbF_BbsYt z%{I7`>u{{u3io(@A8WR5e4IoHv$4N4EMcgREGkY;d6Qiihq5Qw*f=v@dyAFEnJKvE zYJHrUjN7~p$C*v>qT~5EGlRZM_88LC+|kU8^KOVYZ`SF1pp-;yD6 zpyFDg=iiZuGUaS#_9fq=%VH2;ZDrP{HDo&##yMwSw2m7`f>O*<@*`Qo%01?FyoEIO zZ3-mq60^54my^qWIo;U0wq^)xKf{dW1x>S6tCXL}N?%Qp-%VuW+nL+7E8ey3&D%9{72h_f>;h>G z?*O6sgC%z`dtoD()4?2r_x)bzV0P5YDXZyVw$bNN)+E(zuis7CuvBxDKA*BRJiUOj zW2xpyeIaG>9Wm_p&=G8Ss+r8(9nF^dy_7w|p^NBn3H`C7*+RdMvc{bNyo)vXmh%td&St7%VjDtUr-VoPs>z^&_v>AjKd?P=amHsLnfj$^Th%j{() zktacdNIlxij3iIdUNZG+FS7;NOg(I#$85&JdPCK0p{2f5AGW`@8ArDI73jjQ>jT-^ z#%}6k-ia4AkM}Vb$Z9%-r()S3 zH<a`N>#*Q+CrX0?3x7nK9%Uinqj2#2irMNltl|y_sg- zO1$V>^^NFT-*nKjhh?Rk1Icr!Cx8di&8Fmen!)QOGOZul+siuk!}sO);q$7PNl~ot zFw{G_yA3BsdDtfO%W@^B z9AHKL%`W61go0<*znQzEGqAQ5Ca1i_n*uOo5LQecxcxG_+~2$&O1sSfh}A1>)&TQC zDD94e%oZ$VpjiUVEq?FkzYp`@E8)I;ZIIcU9HoQVWBKTA)4^sc)c452W--+F{=w!A zP~X=Eo8xe?;kY5@K)k!QVF;?9U=4;s#k@uD;5>+9?xALX@;2=$QzwVwm5-C`@=(b2 zJFNAMsQxay`$jA!@3Bfe$ou|m3KyD^56lPtV9C7?X>T7xlgUSPkS`U)77W8=I>oAn znR(=6dKVX}WX@oN9UEb`WuD@zx_>#7tVp4*_Z zlSZ0dp|Z!0H2ZLsJrXPAS$2e{zF5#r1LahsIv?>$$R!3TIlxhfAc(p zu)#=9`I1KZ8 zGdhHyVx(^%Wd?*p(k(Oh2 z((gBzC&z51KVYymIcD$p2aRaK`kT3fte7_{ek7+nWUxy)=EL!e4LLSefuV4&II;dO zF>aTHf9!-jRIoptW(-t&bgub8+_C_q1MuF2t7vv|%5t_h7goRu+)`tXhN|x}#q0?! zU&J4bZJmNe?O}F)3e@pxHgu}l51ReKshHc3vX6Lb4QrZb4#JhfIeBK9wvKJe!vtN= zzRm*$8(73NOy$Q|-)UxB^0+acl@EaazIz&qJi!i6Ly?W_;xuUUO)O}-xd69AJUHEa zkUVAF%@$9GG0`pGoPrlz*XLu7ZQ+ktt{j5dH~D5L*=qEX`odlS^}fw$!BdUdr~-)I z(?)BVS6X1&$#x?_rq&mj!DNTgQl#n+c6$25vDhCcKJMg{N_L_E3t|Ws=Q*rep=<${B|jGMSZfYL8CL)orE^E$HIXy*epgwY*YWHy7* zomyn35HIMqyD&+Bx)sdH2{Kh#WDX|J8NFqyUNJ1J=Z!uxHKG_ovlo3jU5t7U6`KvT zeQZN9=HUzM(_)Omi!8CkoCpKEyu|EE4j7|l$>Sx^od;Px7plKxjP#Kd$|krlA}<@Q zWV!V&GZH$x+GXBJUhyZLylPDIt2KWZIFeCfb|9~TKec`S-i@{IurZs3q(INx-58=& z4`k#Bo9i(jB}a|bT%UPZMJa5`V;oq(4$P1yT1)b}v5sd?=b$c7^*_xpJI5b4Lb=x$ XJEGD;=bJ_-f56QK&0#2Kz$TN;r~4|_uk!20^;+$&+mQz@-e%2?#!7pr_Gr&XJ&Ul zm$~a`#)=D>_PXy?t3SKU=kqcjw_aekVq_c`yo z-uJxkd%y7h;63g=;r-oP>MQe=`{w!<9?rO5-!O*YXa+Mqt$2)~nI8RcDT+`KQ>=ew zl}D$tIaWn<{Y2GL>z?2N#O>CR=#1#EsiuC^ijN6&`G&p&Ayx@yd<4vRz@#2|7lY;V z_-+Xv^WCxI0A!AJTTH7AyN@0TUPCi@KKJNO7q7-NNDOr$F<2Itc5Xvre|t#B|8htn znE&RGj$73ZsR4V%+8NiLy=tZSN8<0J{vK{gQaqJB6yFOaTjNvM0qe{7mh3YtE1^Nn zPozA6+swo_5cIHhc|tBK-jmQ5f8R_PhQD$Klu z3l#H%Hy-c?tnuifQUD&q+p|Q2L?f|Se_aZarw6J?;kqcP#?Xya7UbO$7}YYkzK#m? zSLttp$L}||DI4I?--fXg6OxDCxb8@$$@)ON(9-aPx9t{ym}~kLYkg91fRu0B(CFvD@;96gw&fUivpyJH6T8h5hK*>S0fNv=Z#M^rkh# z@m6Z`SZQheD5twK6#+ThWl4Kp*h0bw|ypxH*b`8tAv};Z*DKni>67FR&;|cLQFs zV1@=&H@x^`TO1F`0PW}&uJ0X&ga=5|DLk*^!QxAlwzQG!lnx6foq_rI5sVW3roYKhY!3$zO zlbUM%RHcXHW<*7g{y8l$IuV0~ir1J)a91$cKPEif!haOEt+q(~_W{0)jE5tX*mcmuE&_BV~n ztlQHwO1_tN^O&s>VNd#x^34zO?=SMNltO|BI6qe|F}zuNSaXaYl+DtAsve5}2xD4_ z;km}v7pb3AHSk{oiH#dIwh z;F?-`4O2^c2<0T*a?F9ZSpQY3e`tEddv7=E`>fa;+u|~t8;aaC;ez(tt>o-h`Db8o z2s!cgnuS1 zijtWKQc5MVK{xc+541o-^rt1b-GW*t}1TQ&RfE~b1kqE+2soLNIJBg})b7|~W$ zQ+*opowY3ZP!k28h2YB;QKU9rI8uOinhg)KZo?Dej3^WRURV1jUKQ)Q*6F&x0rTJL z<&;QQtebM{{Hfp$m?g_Xj#*@BZ@X@Kc-@k_&AgG-0ja)}^|b5eMU#Ed^X{LbY)8eM72bS8(YLJ)wm_2{c^gte}SO3tuph3r8 z3V;{F<~pn-rX)g6P56fK#pW@hv;LA}X-GDoB$A>RIehD*25s2+EF+9D(3{ivKlpO zn;_Q}fUOxZ`e)YgX06d|akI~clxEr;y^U)u(>5l^=M7(Sf|!5X>z@O!yAakA5{0=qVKcb>ewT77qv-( zI4fI#Z&+Pgd7aWR_BNe1usznPPIcIRt9IvJVC!w2bJ;;_Qs+w`F8e!Y`@f*pL0VMo zu%f%v2g{4P3`#iYlwus4tyx{JWSddm(JC_kV7pcobnVU9u6YGrnRV~^cO@!~ki_8n zqt>tIH$t%#^YgXuaRiCYG8lPteCzn@wHRmyQpO41+D4hn+vga zMbnGq$H4ydVRh=e+q&bD>mdpsUjlZnVwVn}N>^TnChM%}m)D1SuerP(ihjQQ8)`6h z;7&^)Sl{~m$^hGFoxZY-TlOF-O&!>Tu}#*xL4|lYI;e!K$sL0)&)O?T9HoD#2yH~^ z?@z_3p^CBsD$Y`G zp0z8^SC1uSp_;M=YMyvT_WiJ3bDXMK8mcHOpyEzd@oW0P<0199Up-1B)mOt~>LXh5 z$JGeQ4?1GQ_LQ{Za%^ah;>}KRD=r73N|%O8CDy8RQm9m7s7jC6xB!?$$Mg^$FRPyE z>x$6VXYsWJ{Zerm75n{o%2%B5|IBGjJE@odA|FajIiyzn{|z5pF>{zeW5-s-vM~D?J7{)m^RL z^rG2-nZ6O`R%%=vfcwhLl;~obEO{5}k2`9YU_lCJg+j3OX>jSU6Lq}tFsB~4FIY%( z%=;@M~n>x@Pc6BH9(qS!#W{DuWC=U8;xHM_S7sG-^8J=6r5yh=I zYN0h+Xy~;>!ZxC0F_{FI6u_f*ST~sHgcBZwTY#f8K#~0r1b}w&CWOQ4gF>|2$3%vy z0)v*-wSu_e_p9aIVeV@x|$*e^>W*e^>$?1z8uuwSdfewrV_ z$$$?ZDaC;(!`ryT;|XB0#4;`zL(->#pMhA(15k(e1Ggz4k0#6@Ps|Ueg8r#B0IdjO zi8W05y|MHJF)7CQ436qXEc_|?NSyVW>9!cfbWZ?DV0KDK6G)H%UkXyJYzxN@(K#h3 ztQAkE-ob0k8a^YI#3<;qnT9tp#AXd_CRBhmqzCV*caJ#=-X)C&5>CiSf$^CCC^llL zfaRCquatR6iySp|UjUmpqu}+mFmBaiXCnA7RSrgX;Sm zcpwlSGX^?iiqdhef}qT;X8<{qc)^O z76#D45W6K13bh0ns;*kfH(q}l3f-1K(SGf(4cJSaWnNeX_~@X+W-Dz}?cC4Fv4Jhd zpdxC>jj7kHD@L_1d7H4rU^mgHzKV!M{k7sdsK7iTi&xvl-d23vd@7bTPRJT|G&%^p zr_nn@zKe&hG1VFcVf#Cvf!wnCX{;n}2ZBt7jJ5MfR7jrm;yUonP#$CBMPDQ){+bwy zA)}TQ6uIz$d^mOYKG5jq4U)T@)+h1Uio-wHI{gVMRZCb+2|&k(mRKu^V4iBbZcbse~$S#+CrU8XDzb%ym zl;cwYdlE)kS%$Ti-O9y{-S5V*RtA@Qf#!=3Nl4bq5BBW-ts1Aol)}w+*a^#4M;6FP zg9rNSs%oePJbby5#hSl+NzYbQK%EH$Isp);>Dwjk*@nkTYw;uVtRdslOAt4bAt>rr zLrt0x^dDs3UK>p!=*CGQayTgAHEm~vL6G(so2}FwjVSW1h3!xUQP<@fy1~Yjb=R2W z&d%CSVPdiX&9fi^*QZgaJ_unLp@c!svX}(`2NIx=oo=zwX@!nGP%uIqgTckPyk=Z@ z$B$Wej!8|ho6wA|KWW`Gx_3PraYKdUY#^XYc-!B`Si{Gpxc0}z86FKq`4M-JI71~W zk z9bJ(F*7ZqormjiGtZ5T6vgc2atU_d=Zs^nMmz?jHA=Lnt zeTVHpBU`?KIin)l!nNxv%Yi|42p`l>5}c#JcGa*!PM#CiJEF64d+@;c47*oa$$w}0 z9x{5ny<*y=Ua{dvph+V+nR3b>u*E7s(;~`lPs7cRZAyE@dk-3=vzj ztY;p2szizg91(B=Q4JVwM=vCw9wV`=+LEx5t}FC}zPoZsj;^x8viS*m8Pn3xrsS#BKAY1SQ4(Zm!||Jd|HIM{_c#-Iz2Jf*<9tIS?(jP zn%R37(oSoTdGefrlfAP_!mnzOCV>9fXtf@CB$xeQ4SzK45=D(rz!Md`ChM8t(YoNP zNB@=_fS?z^e-Ae=w9);U4A*#cxw(ZY0m3Nf9D>`&rn1*8>8(~w=MGP9d{lzh!i7h; zk+-Y1kfe@dyco6|y88md{K|Uu(R75~K7MpizKYRe!vxkhM)`+9P2Vg>par~ufYEA* z+g>^8{Ejldr(Tlg(qc_IFK|ZDGxN{n*yXfI27b{hQvCx~uUNY#HAd8L<>bslxln{B zBqvvoIbk6QgA9A=<hdk($SWr|sG~yH z#Apwk4>yfP@{_QXIG&oE*-(-M*(h@0zqxEiVACMeWpfwK%0{c}l;)YQNDQ#I5f#Po zpNC`&@=7C~IwkEY<#LnQI-Md-LQ_l!VP$84Yk$*Es{Y99pkfjSq*75Nn4I7$C7x(u8(48b6kKvm7kEI}#^g`5F-_=u_Op03q_uaEawEL!mv~ zkMOkSq)S11B#kR!=_ABwf>~#+c&q_Jz`Gu6iNCs)W9L>-eYwWa4_Te8mWZK`uv(T> z3+t~&EW`0?tH&KPkw1LQ5=TljWF8OPL$6#9NG(x>Pg6F-3g^{8Od z1GK!OXzlL(!6qPx0R0{(TG{*DP7`aB$YqDDuf)YQq--c@z&t|Ld(UcvrS{laz1zE> zHL03D>O#t&+*F(>1;IzI0Ah30c7iNHzxsPt%J2AU;yo&`5u>G74joktj{6a|3NumNIFYKUO!HX92=d=c(J##L@ z-)2wLXYW~8J<)-^w@N&b$oR3cRg0c{SHpORm!&thRSHgw)`bv;KzNnaC4rK-BENxR z4otVK(z%(n)qavx9C}S&jC2&_Y_E6>$SF-j#tw?uW zO%f(iB{#iuPC0F=|6RNv|F7aLp;y8?!+L%0twnZjEScvGJ{DbBASiD{%>XvluAFxf zM)%yjZrGSRHm@$5Yt@?H6n}fp&&S{4^V?%z_<5APV(pth0Cid~xCrGBFQA&s7MxdK z<^w1doNP}A;R5L9siZG#fj#RB7B)xD$LNLWl@mf1EiA>0{6&rI7b=Aa)@9hDR3w+* zyQom5A5k(Ev=O1h08`5q#HKiT+9|YZag&HriuK8%l~W2DnU99AaG5!7g=EpEWJT8$RBY#}$()tS7f365zJpNo` z@HlTtW@iOi&d83vQ$v6kK*2v13YL)%{E93q(sbK)trhiq6PgVrz(uvS%=aN&)1_Wmzte*O#@dtRHN7BgZUZ0_8|jXuG_5 zmoTC^43moE*lBuzfra~FXxLr}Pj*Gv6#1@8m*+*#TTT+V*V?}PqUwmnR^m$M6o%5g+s>5W)%0h_s z+bjFm3&Y1-47ny=6e|wO?=rzdX-vNt@+#AE)C+Y}ULp(-qLJw=whme^zEH2ae!qW# z`psU|r*?S1w!mP;;|dIki&WF6S0z^N6buBA!>e*L!hmo@O>MLp2$-|Fid{T!bx}J< zb`es*IB!uKwXQl!IpQ`p@T^>xa6nwTIvtjK!|LvAp%wq)5d6LM#q(=~VE}fbt+oEe zWY~a@U+kJA)h);(2ty$YEG6{{tHnzNC5m*sY7Ll9a&}49kXWq%MN(J+^63>seYFG! ztAadCD?m=dh-wBAUbRPqZ%P#dUTfb=bW$Xt;tE*0f;EjTtD;@1JqxMufe#4KR)zKK znm{v!hE^@Mn*OpBgUGkm$u$jYD8mV(3H|~LBsJanvTJTuy_^?@!HxfR?`!^@-b<2U zA#JOwb+>aE!^*tXl<{iHeASdiS5p>MsVoMZF&PRUTN|+bcZ3q6J&bE!UsSZ;+({u` z&3>!+@3#7SrPbo?O-XBM zY?z|SANT5OE&qnA*jnp`4Q<#vtDf1A%h=jgufOAGm#kMbfL<{D&9Lupg?)cJ?E41w z-8?-LV^&@PImqPAU4R7+pscmBH#NqAtKOUP(4+c3Yg6B(>2}K}$IF)1Pn+&S?%3wN zd2G6MWN)Tb_HJF&f9Ksswe465ldb?AS(4l1WI|=ev+U!YcXJaxybCQYK~NOEYIS*U z6hOW9ULn99c`p;+qu+0lVh8i=y!rrD{o3mHep*AdUxfvYoZ!g+`e>p%CW`IJfI6yg zx2C<{2Bz`d_us^@CT;GT^KQJiW(j|g;kY5{y-NdAfjQKE*ZOXA=X5y)WGQn3jxZoL zn!Uj|(5CALyV-gxcS}A1_So_q0RFh8Pe4*QP=hje6X5trBI6Nxf;OG4!5{Vob(Vbi zAzI(RRkhBs)^D8*2>rG>5Mm-B#DqgwyN!;={kpAJw9IKk0!_E=2HS1juszT^kbsVL zSx$4eUBS=@>jaPnM8fwN@@x}medj=^kPvom59kpPH0-x~KQdtc>V8BCdV@c@6dkSl z=tg|c+A$d4@7oc;_a}EuWUsEO_i+ZpsJeVYqq_N%rNFY*rx7FKMyySfh!KtXv=4x6 z`INed-bq~y+DTo!y_34A{TX%9?XySG#kS967vJroE>d<=7wdOBT|^Pl5CXBghzciK z<2^5-?x8(p^<^40Ny+SxiNWx##IvVhXZpZk(}_~J_^s!E zG~p;=wBl$uMsSvaQSdln)aN*1HSNX3L?-Vx6GDjdq95wBI}bs$L2{V5Bu~{l$)WMhMgUQgcGEJ1$@?sQ-k6t zrmc0s&^*@q`KcxNf*=TyMukhFFkbjSC{Xj9++>j<7x@f6zfZTU zcIC%f<&LqmXo`R!uh zhm#w`bBctU-as7wM~Ga7=NfxF!FZ&JC323v6O#x0@odIgcJUQxrOO#VAtpW!BegM? z=zD{*0MnG=H{y^zrM!A*5J|8M?4e4p1QZzlJ_*tBu^0|vmSFhA^$g)MEc!?W6(VnwBl^8+BA_?P#9dQPXO&NOdg4htGza$>$ zQ62G+t*JJ6&P2R~283zjgBk>$x8r4jiSU^90Vab#~W1q0~VqiSOBqR2uv&9hb!UX0_Gc>6YZS)P|Kn5#db3|e$yCh&6 zTSIHWy{$nCxI`4`^82%C8-VM(M7t=K&31_aQLK?|3Ymelc1o`uiyH&rsQPPDUC>mN zqi*mu&U`rV3SOz{T<$k;Wj-n1hupsA;N%jAe7VzUrIceB-ytyi5WF^QUmbo z8{&|BdqTZU)!C&z72|21BAvjLYwvesig0(nHT8{yVeZyhwBjJdK3>YESgA{F{WVzK zJZH-sng*v!&W5@|gPu2vel-|Q&4@>8Ft}6Vn;LS|{+bNua)eovrQ%~iO_m$0bby;o z41tV^IW<{Ekt##_I!U3O1xUUGrL96dm2NYCoiOQ46~ESGNdOw1%Q9e|xPGLTCn0y$2{f97XO4g}|k~ivrFFo4Z4cHZ?IVhsB;p+h0;FU@SPcAiQ#5 zXa-|N`(&~QPzdq}IfGEjJtkO9|6+}#SPO8`E0>K*LI(&(Q=AsTig+fr=R(cb&CFxL z5?c6>X2=B)KJXRC=BNV~pD2hfEq()Vj(yI|Q*{;qTadB|M9`dO1k`mhCSb%gW&80S z0s7F}M2gTUb59e8*q_NV^X!vdsOELPD${F{a&s{#j}5c)ACwO%T`ak^CO*nzT{D#b z25BWjOUIy1J7OUU4VKmD@%a-}o~l7a+eG4lgA-Kf_a}QO&f(sl}%Wx*kZ8r*bz}P0F$&>JY9#SvvP51fHf|9r<^K3Co6*#tg)vJamQmS zDCz}GZH#t=z85P3tdR|(eL1y)jWXAS0U;;LWBwIbQ50m&^~JJVRDX1##6sK^WX%F@ zX#`lXvoaFLgRB`q^ z8AAM@RZ9K{FFBA88*Y!JQKjCqJBJEvv4dffC;Bn?@eNq_24z%^NGCK&KS~p10FE2k z!UB94#VGfg1-MCJwD`FJYnUrHc`)}ar)F^SpwRj;YL*E@RxUa;WEW;nlq^!JL`?Hh zYW>4Xzr@0Z>>9RO#5TelS0*|)V)wD{#Vd_iqf8}4V5!d$GT0X-WTzXk!sg+EO)C_< zBOI~+bc7HC|cPZaOaBhHY_ibjxbAX;wkEp&$B666LitbglJX;K#%5C08sQr0l>=70e~}H z01#{u4q#Rbwji=|A58(}VopnD;KkaO;kAKpNB+}jac3)bYecIo(WEttOLZuy|3v)3 zs#*5fuXQCD?ZoNUl~Ar7tI$QZnOM<@rHMmru&!M#>a}Hq(9_tqtOy_BWUC4STshTF zvZ`78Dm@--U#Z8u5-d|}B4N!!Ng*_)8O0>pAlcS0#?b_WxyJM~&^FOiYb{?rty511 zp3-II6QRn~x`B2~)v#NCt)6Iwfp`1WlL&TXUE+V1l)_v`yt%!jU-xjF}$t>O)UQkOc=!6*fYEic{n+Uor?<~hV#FSF?oXPUU`CZt9cym!#mLc|Z zVYv{klU-QtT8hll1B35LiY0q)VOD}bQh~-Kl8w%1xh+u{9t%8U@^U2*D!}|eN(w+w z&=@lS+eF}`0g5rBMF*1w0Vsr6Yf*Y0yDI{A5ym+1e9RN8#kliX<5<;NGyT-s(BeRs z&(D_w%kRdXLDNm$2(6jjS#4YMnW$W?rXJ6J27+r7yLNjy4-4`&^Z#R{Azj1-Y``Zg- zt#%hmy!T%y@lNl@Y8Ti_73Bo;oNko$=aIAGjr~~1Xxlo9CH>g>!2MW1mW7Y_i&$2U z0tRFu;W5(hGvN2iQwu=+{EJu_hWF(~viF?+tZuU1JEJ8CBB^rfy{tdhaAm@O8S5Zc z_GdT7x>JYgEf*RUG~M*bwxy^a@twyoOogFi5iX_gAnBO_$C@8C(H#x5}IZf#muJ zF48u6#3uyaPckXVmmtSF>PptE<^oAr!w)Y&_7Co?yOO1(D4Iy8j0mn;e0?Q^d4bTc zVnL!xi>p{se_JU0SOtKSJT%S1b;mR-B~!gp%Y{CSr90jb2I6o7jy|{{x(s85#x^-hu8$?B#mNPZC9$zIr0eL_`L@r@mU%!U^6BKB3E&DdtB|YzfU3iAm z!|+hy^3yI0s&=`K)l>77-#?2ZQ4!*ebKy)sJz9*q4q?DjQSW-tvsB!2Jz`r6#AEbS zCO*9$W@~{+x`8!|w?$mps|#*mY0;-?oB^+he}H9*@i(xC&_4b~)-9!!29EL4dYV#c z%>uow;`$pgW3Xq$^qbgN;~5fYxz$cSs=?_5mw^NHR*Vuem-5Jf?7 z+l)ANmlWr^cZ2kE#cg-9@jZ@?11@yTzlnV-7D5wT|B*^FY1LW=w@JFoSVcXH#44$a zpz#WqABrYzN3hDtRpRy$vb%XB7>VxT5fIjKBIO=g*y|oyIO!hNKlxYc4gGrc@kBfL zxj^+EmgaCD1|dD9-UcKsZzKX=vqj61EMN;Yw}l!psk-rdNl-}lJTOI%kAyib6EBZs zU0XPFEXZvyV-XFby`G1pv{HBpV|MftLxz5e!h2co4CzHUu`>^WmX;9kr{BxcTPQL@ z!kb7*cL>N8!5|5+z2pqmQ8KJa8UFmNWZ-|nyh?*P_Z;fS*N@Q40#ezI`E>?ognk1x9*tdW=fmVv5AzRWcLEwzc ztKl<}>~|1hNG_K8*bEIMAavkI=n6HWZJ~;x=tYRPSpAXh%_Bj9!|8IyN1UOddHl;_ zSd!R#JFHcN+6k!Lf{KEN*dwHS6%*jWxk~rk!|W+uHov0RMD{6TQ!4_GvKe0Xn5dY_ zKE%g_Y3w*Y{yCj(!AH->*lYNRvfv6nCcdyh6id{9oE5XjD;|5CeT0S;H_V1N2g**L z16w@5V$U2lLSr*3`ag{vC^os`(sH(uvByRBJS>^#i;{Wlc|Pq>Md5sQ5l8ish3sFr z#Hr$*#c+cF{Ea2>Am>+%Udj$=MfUcmUq_rNlzwb_s9;+}L6Qfaf|$O7Wfw|z`Qu1O zRWvxo14AlHP5L&UNav5odpztByH`N>CRLn9Q&!^}4*dljo0C6uiJrF}ACs z(Q5ecYk zd7UkEUWwLkuq}qfCevRIsL1f4|Dix+DI92Z*Ri*-(euqZPo@3(>NbP7e`J8Z78b1veBHDJd`a189+!HveYF22&*Z|7Wb>>buD z>1VnR#~imD82Bk` zN}p5kS!-H3dVSFFJwL{XdN7!<2s`s8-hKQj>)7CN^)6vdFu}xa?oZ0cU}9I};YGzp z;v)DZvV2y#Xup#kivHA=g?gW{V8PZ!a%ghM7zQ&nT%m1lRpgPSC}Shx?gOzKOiPMzizgWe>P4NS{R$VNgSDibg})AVcZOzj z{F~2N&VSq$#oYe7t6TQ6mQBlP$o6e_%mmubK(Kim8i>yx2+@AqUZ~}4@xxx2I8hP5 z4-N)CFWV181R z?Rl6q2vITiAPX`!yJF!Nr0k!Gj4#pl6Vd%kHjC{O`@V#F?yDGb7=`$p^c4%R+OsGc z56Q$nNi2*suCR>LpCKX#uviJst@ ztrLxpvy6&HKVS&#Ju&JiE6B1ZA?k?s4Lvv!ZJwA9n6QX%9mQtghaz~4-5A0Q*Dg9@ zj68v7jbpZg!HQ`(5ic!C_3#okb?<}XrCNf!+8D*vu+fn@c@2pMkW@&ZdV3c}1K?D?1 z43AvT*&2A!N!BO&XLS}w%sq((CmOzU5-YrqiSU@iJ{8}eWKC-*x6ptVx8gdAcZ#Av zSRr9C)E1-$vCs{TwVb=W5Fb|ND78MSK;epQ)dPc4So=t4qIW^$_^vS|@ zq-J`ZqIgvlYL%{9`OGM%3Wi0kLRAc^l7)>`GsdZd{YD8n7E=i3h+%)S+-OH!7NzhU zG4D?{FtQ|g3Pydt=y?i+!3=flDHfp5si&ARaH$N?OYX^?RJF&dnd>|<#P?3&b)H1QsA*8hd61y&{|QU`@#hm9m2+m}JI9oVUX` zD<*M%A9IB!!ONGp4{vyRg-e46d|@R_JY?Gl@u*tMFl4O=@$mLZpe`VRwh9)+@M|2@ zQe4RmLrtuU;VIYIZ;)3YQH+$;Ga(aRt9oPz7sMWHT-wC)I_#jhI+iz2`oh&bnwJlW zXJUB+_N90?mhYfl*f^d+Sg(oW`Bn9i4(}^{JQ>Hs^wC#^;698-rU^%wI#n8ueYzpr zMi9JeEko6cAh@rOnC+MPC>Hv8A%^g=AEP`Ze)sb+u4JuD;OTY4%$2fcYHMbA0+YI9 z+og5!2nM_*4#e{m(&1m@dGBb+9V8nBTJbb7B!Tyd0Nf!AFbR>=Z_HFd1vkj6)k$vm z=S$?RQ>6A~N_WJ74@t3zjpCw2o>rCkK9~5T6M5~Bw6iM2XCOWue~*-QRT@cr**1ds zRcjfBtQA3g{7wyZ5t6VbNxZ&m90n)x(eA^AsXSerO5&X(Navc&VS{;YTQz$ykP9S$ z*Mx;XaR#Q-gsn0Yigm}3kQqscG^RP?Q=PZ09(uDHye={G${JOSQDPXB;eELwUZ}xy zkYcs925+uD@-Ts$$RE=8iAFVfo+`{qr(AHF)b)L&N#?pHxa16%e9RW5HF*={Gpxji zJNmC{@+;hj9?86C^>`8$2+xm`!zYePCKD5fAqrA>5VGDcg=b@1{MHoS!Ud0cK8OvX z_fmMzoEZ@_H0VfI>X`2?7$+UbBJjj*1_qh@_!u!gm8YB?yYEuNu|rs{(zIs8QalqO zSbe4FQ;WBDp-rsC^Ue!b7OI9QZMe?Z^O&z%ox!XqeTVM=H9}`FVJV$ym4@U;XHtGJ zEo?$!4Ha!)OOyJ&H;vaKQvZ_1uOmx0C_Q}Y_+PeU-VA=>*+&s*$c=ng8xYY7;{i+$ z99Xp+rM1lgu|I?Nav9wulV6SuKB`heEqV&s(40tF|X!~PVl zD)_D0N^xZlAHcpAD|7f`E--RE&S-|uNu-|6<*ll7f)3~M3odhZR+)b-9U=h<;LLf< zMCk?pyTlO>=fO~XC)VJ@9muh~$|`~-6V4rktB6ldS7&$i#KlPKw8tZF^1uQbqOeiS zF?emZLcDJ9pE1v`3h>%)-!w@^swcQtsc(BqJG*+e!6n9a-;_!SD^6m#QIrOGyOJv2 zJvm+&p6vnYkS;z#ntfX!gEa4iDj<~bpUiaKsqw!x)kV&9CAIk-?5H?en~!ug>()BF zjr*{q4sYi^9I3-+v14LviOoc&lUqjxkWTgw4RfSCNq{bp^6@B~uYW<1;SG8*?Dh-6K zrLYlCr)Q9_D;hQ8b!ymN9I_j+fhQ$?Xd|BH>K9E%XEajhq}1CRxr!B`)R_fvc~^?g z1-#7djkGz!y`tH!-&Jz*yYnhF`N837E4E(18@a8vv^@)sQOfYdRXaD!m^He{DRgm^ zr+g6O#+Kej@$`lKPO+;UuPsg#@9sLBd&FmN zE8e#l9Ts=B;dez4@c*JhHYHX=7tf+hnRwGd0sK%ZTdDz1G zjc0g)NbAPCxC96+K8rPhDWduJ?DNbE-Q0Nw$6?9Iq!TF{#qZtt2;@86(VgdAQzfz* zP4j%qTd&xzcg{2F)A zM7uMZP_BB5uC}*mySLpvc?-wQupMja%;?2CxJzZ*jr z_xI*4X}jTt-noBE%-$5gXgt6fu4*;&_} z$!bT|I-}L5YL3b!gi&rD&%Xg>Lz^ zD$2Y5+DMwiBQZd8oRpEX_N~_-hMsvHPZk@8p|xZE4-eycu8SUjjoLJre%)VzY7=dX zhFEe9wxAA+UDqJ2=BSnTTKGNAL!WEIf_?u9<_%q>BcK#tcdkemiWArIOc$|?>k+0o zEH1vDU#=?;kbzpII2DZr;mg+}zHmf*em&1+`^6vEhefCR-*6V6CP@WSYE<@VWFV!6 z&5H#$@D}ytvM`WD9uzZ@$b%v!2*3hSVB1CH^bNcSXCsPk)w)(ubD!!$;&>d;m!+l_4Zbr|}er zs){=5$GaU`*wm>K+KsHGs1unRL^PyYwj%}Vh`+bEmH6Qf9-AP4 zGYr`8jDm~^@14AV^qd7sL=H_+diQijZZWAJF=)S_FXx-?;N(LFpp_QWA-8m%7{!|wqt{um?H-y1gJ zREurW$g^YHXC&{DJnc(~6Hy7_bKHFk&>#E*ljw$#Tz9FlXJi;PPL5R6$hsH%(Qk=P z_wqCfL6`iKw-S@@<$mm*%)S>$f~5aLNbZ-Tm#hp&GPJvPj`&>-Pvi4{0(;2Ppa0}} z^qG4f#P~bW?mks~)qSe?{`>eX;niaAS4P=+f7SgwGeX?Sf5xvuL){eClwrI0m`Ul! zD6wTq$<|0pJuF^%fOo~@|K|fdJ=v}$*W}Lo{0DjOOxtCkJHMi^6K^1dYsB0FAwD7| zJ;;Yv%_NK=62w%}coKM&E47As3d&!&|Eow)j}qR(-foN`2Le6I>2q=k@92u=wvw~h z`8egLBv!Wba*|H8txVlgH_}5PP7o?ic5_=P9|@o2P$?Xb>B$rEZIBE-WVhg6)xOp6CDraUfY0Uqj*iY-R zd`^U5NWqpq82>B5KtP6OW>00RNsd8oCDjVX@fN`l)mQ}8;AGJOW&D)77&9&`aj4n= z=bz)GN*d#pO12$eok~`VxtKj(spOjRktzwNbU955&XVL`<9QFVB%L1OG7{z?2OdsA$k%nN|(pOb<3^CDvkuZ!c8ou|Mh883!U;c2xV zlIKWq#?4Pz@AAk4ZQ?yW1)PLDy*`Dni_o$&&p=hoZ~IO`;UtnTlO%O0dN+Ij&uJT%HEPkwh%Nq+-iB)(k3Dm7;J3GC-8=wltMZ3tVm6yg zl>_nE{lToL&W_?I!y&ou6Y@>eKVnJ`a3`KTas4G~NEcMa;*o_O5fMdJr+k~h$f1#) zNsgm;j@C5yYgl0haaJ`;y^s)JR1lf0fOn#^-~s3OA-YMB9l5m<0ETLn3Z7P?4(VX! z3M8q2Su21oC?ye)bo*SPye-8KEb#l`bbFCUub^&?Sd2P8Aa4w}PaB5X;K#@VxbQ-z zXhf`sco#z+%8$*h+ztFVklGc<7PENAztD^+v-lv`tM6vj-9CP z_Bp&GW_SGwUaz6t=R%~5{1a?*5jQjGoR2)mOqZcTY}G3<^a)51KOMFtKF3~~i$>4zntvC!zHGFJFT+j_$l9Qc2P*}>9P$n= z;~n)7#eI|s4 zZz@mHK4!z-vXFN`B*4E28TQLWgGGD>v2ooZ-hn<(EmDqe@LAse^6w)-c^y!jDrF*L z$W^L_6M~dU`WS|nhPvw6u#6_kL2w=4=}VO3J9M$!>m9vVWypQIgtrhMEaqt#|CfvT zL04YSC4nKn^6G@-CxBC&a&;11!kZY<)r+~b9?sEBONyzAjw} zd2wzDcy%SV8QiC#t9TN}Z4`ID$lFCs?EIYl$RjUS@ zUb=jPa`>LbI5t7xIgV4$6>n8w9$zHBtKiuU6*M?ZMy!pRUtO+&ZsXtsdJTUu96@v| zn!e1lE>u8qPce#?NVgEa2%InMhG1Aex2-}MKSLLdU~vd#P2r3988&Nh@#mbEc|jeU zlC~vykCR2l(*B8lKr!`A$sz2t#xPH{xux31*BNv-c38X?e$u#d@y=SzTlQ@i zV)?7Q*42~dj|=x5aA!N+6LC=a4mP7rm=oNXfqhf_-~*MZ^)8OBhpu=C`e<(rt9#tU zgQd#txTAYqc?E8^w^-Ok5J64W!tc4$Z6Dh00Zz4cyv}{BRm3G68wdgC_5xEUoTid| zIhSyt1lApwaEPRJ{7L;u@~3E#5@X7{IK z66bB;^&3~yk)xZTwIdln{D#$<uoDn*APmppf5evhHBh`bCifrPkTB1b<4Tyu%9v zPb*&e0FCKS(F4BW>hlrisNr-8+f(A&cX;6HDsKS5t_-&ft8@t`0Y(kSihb@KO)wYZ z=tte8dYVV*Vtz6);ZYE*&SVgy-?^>qLJ+aBEg_~c)IA3>Y6K|X)%@k~zkp&CqK4jB_D0mlX6LTvDy~{_l_VOK6s+N&dY^0$@5T~#0 z7AV2P8B0X^`}iq~xq@xs8REA0;bqPhE8qW%&VPHKIv41Cn0RC}KXtx?KrSJWLEQn1 z5CRgVvzG*N#k4J0Rvr0(rzmv#L=NlkK@}6WIK#>o`?m0ZW>1WeXWj*}$4A1v%W%|) z4|#uHRxUPvh(ld-#n7$1zPdXkkZoidS}@DV0L^D@rEJ1-@zPe_7qzr)yci$1Z{xMt zJTZP7A4+SUueR~l5k8XF7Sq2Dehl3X_$$5?4{qnz<7n9f+j+-J&0~k51-Rwqo`6T| z0{e|=Ll%(%#s%uIygMJH^R{?N`gDYt&%m49j>_NBR(Rk zi*x%lSJ!@3xdBXG23Dms(PMrwU77~bU>Cm|$C97g#T&#+2|y07y#H?dEGI%GY+;D4*72J_I`x@XNzz5^LrsZ*BszuY*NV{#nA&i9p}hk@}w%*&wLZd4T10df)u%O@y#y?am*Ld zzrq156Ggwu!V7=pIsW-lXwA|n-0eD2mCyJU*5^gB?N@$D%`$qeQnN?_qwuQ}txoWK zydH7_8jITnPVg7lOQQL2{7UpQ`8V`aE|#c|cYlK>m5B>~=k;<_KM*!wE8cwbOL=|)eTlG7y+t?PMCmDX$1t#R>uQ5VQte_USSBHM84icNG%@ds+||DLfZg0=H&apy^`nRxXiwg%>j11I@F zG%}BS)5P;XdTXZcaT@55UVRNcp^N+f;0wX|;Geu4A8-E&Pic<0Gzmcc$2nOv(45S_Tz2ct-D*5C1~y4 zhw~D&Ligbx30iXuU}=Ij(=F?jsCCY@0W0z8fHi=@Nl_Or8ApmE7A9)l*(ULQqV`ah zn_5lzi-fgYU(izYJuxLoy9Jk`{hp-t!AEyp>*;ngL)RL(4;yr?llu@?Lu=tabgQ8? zcOOb>Xa(-W3pKRXH8v9g?2LDc!+ucldkrnX*cOqUtlh1zqYF%fNk9yipcr?xJ(H}Z zr^!Y}5==sXelOD8qx=T3C0QH5-Vyauz|{>RF;&Z{ag6i`jgfBVrJqLMDDF?u?nb4f zDPZdvEA6kPrNzdQTMnh$AO@vsBO?IU5_&Byx*F*DwJ;$`;s@0h55*qoIv9ux#%IF= zYG;IiuJhQ2nQ2^ls&8y$%Pu=-Ynf>JVUCvT-_OjWNWOp#*e=FqYwM#o z&>%r$fNEt!kvBJobb+8`D?1WJzknpI>vbx=th5D?^A%FLqE@c9IKc*LgWjC0wW{Ti z1?$jos-)*h6kF$MofL%U1eOU2YHVjg2rOtC&{D(_LrY1L-39UMb&_FB^v}`K#FwDW z83&ZDG0+qc@?8Q94QMT@C!>wy#(?$&$OzM#A@&8eG~f%XXo%H8twz=I#@SMNU{Sn{ zJy*~HdUuUa2Bu4GY5H!gtzCk)Yinzb0q&dH+CBJvxgmA5FB`u>V~I~8a>LF+jP>|M#Vz%;OmV0#EbXh}`g+>)=pea1y4xn& z)z=QQ*F+#+y9OWQ^0mv^>tc64?D=-_uY6dFubx!~s;Gh1)_u660ZhVnF};B%0I@+s zt!>S>6)q5@)2H7eX?kCbXb2rxC+0QOTC=ys?uOdhn$l3vG)y-GMB#$<55%*Lw8u-{ zl4GOaV*&cyglCBR)U)(AWikEA3e7@DNDVKhpMavl;!>pWVkwWfDapja_AnrHPZ4VC zHcUk3=+RJ*Vr>DO?Ki}p0&Pq!d#0bo%oE)HwGA&WK^)N-V=lvw*=V=3Plc}##GHrU ztI>=a9}|`Vq_LQ|t{5Z#L{t=NLt;Ob6!GB(^SmPMX*7MkNUMLo?QO_kaokL*4MN1h zuEEO3Dqk?@GhzV-CN}046sxEo7F6WIY(~rtu0Vq(T1QN{H#X7k1pRk5fdSnq`Zk5J zeO1hFsBnM4U=*34O~Sh)}YVV3dYI=CRR6wu|$fHl+pS^qS71S)lo4x$sOi*-3D z@^QRHi~^_d)5@D^=kwF!#8=Iif(o`*Je~d zhEE8g$T2+LLc4&*@I?!)=sy|5C;zi!__U=~0&yD9N+VBjRVz3;2Sq|hEm$$IwRQx- zjf$RawFfw=ywhImk1FXMw4YU#=cNBtOKj<=#bsbB9%o%CB<$x5m)-yiQtg zI6lRlwD$m|WoIovZI^6APOW|*^mV@&(HZJHOU&yG=_(g#U9|i39Yjz+t*J;wkuq4` zMQeeVot9}0XPWSQaKrjCcqZkd)YK+G5uE~yvB0WzK71bx zZC$hvm-<(n>f-In0K)9*9oR&!m(&lP%B$(WkTM1xXXddyrJZ4K4CE(M`weRrs)&LNtTzoq~ z_7QUhl6+S7R)860v<4aLlH zWO&R)&gB>gJc_FTP_we1#*t5WfJ&u8)#y^w94Xs9wOR78JT-5N2 zUUa;}ah{F#khsQDNP)^X8fjWW9Uz235UW_ZhqS7{nV00OP;d86o;aPa|mDF+RVxp zSvVxxW8sjr-K-o66C8>Lk2r@S%K{t%YG^fwLSfNVvAD@r4)LPVoI}y5_QA>IfE?7u ztiu#iipG9gE;;l^xrIaAi3Ni|_|x#^8|!!>!B04K#Ua!#I0PLJD6c+mvEb0E#bm#% zDu(4OAT7AR?s{*_%0u&cIGck+D&q1E)%_&S1}QBaA|Djzhs4;MpSS9CDaA)870@gP?H z-nJ%e!V{!Hwd8TWI(R1>Xy1w_h+C^#F;%iMVJhj=UbYsN1Mh<}M-fjM#7YE;LaNWR z;;E!Q24neD!Mn<-*bH4{4;PaYc|_a9uBN5@%$jgO1-LkG z8jM}pf@sM_w17oV4+tGZoVUv&bkd5qaxqeH$rQTz(*r^WRPL?@v-q^c>!TR;e@?dw z9WNRXx*OBUI&@?046+biY*Q+kba!b$-rzgnjlsMSuFA+@Ch>Fe5G8No^M|MzXTw|$ zQS+fv!L#$FWIJl9SW9Br?wQ0RYiu5Felz6S3^rnxsnYthuCqz&kQbH5XzXl~r2M2J zULL{#74SVCyIy$&qu@K(N8d}p_QvTm7%;K^d3bQfGDJMB-?0-#b-kiY} zEFpyfQKd@*OcZaXVIo88e$VfwYqL;Kp`MxoX<7`3L#-xKq0_&&7rmb zy;_ZmN2Cc<*7{{|(w%3oEQ3v5#ZD}<&VQ~Pg@)(KyY_OD3yk9Ba^hDk4@~?JS!uYM z_~F45pG!!Xn)uK-Vxi$G6oE0A`1MwZiQj*PsVtka-&T+nnD}Lk`0$`B;XR+mQdSaQ zfPdRp2J=rB{QGt#?EEU$VU=9E2doP43!(iczusD9;g`@^7JmIzfyz}o5&=0Tzm#25 ztw0fsCd!r3-M#BL2mMFZ!9&x^G&^CX!5e`M0d%7<`S{Cbfd!2{yn*zsBX9w% z#`JI7K9M;Y1`Th7Yp?W7uZW zIOH5}yJMq2e=}*JbuShBbRGtV%`0SqhiL0}z@Y|b82_T>44%(#%jI`I$k-sqwNiF; z3)vgkbj*I6B$yWcieqqpzy3B!Vi&fO$Pl&j0JL?;0ktLRaE!EI!=Hpx*4)+m@!7qX zEc+zOPRjqp(_mk6jlF&ZcR2C2(iZWxI#c7H_f0!u!%l7cGjO#o6uq+X4`=et3-<*r zWWPKES62n=v<=&Z&O!JIzGpq9~nE?ZQ#j~WBR>qD$OU% zNTNDZl%*O4ms*Ze9W2e_oN2)~&5*?IxC$UuMh#$#FLojTH(_ITK)6(zbu#Ro&{ueK z9>qg>a=w3#JQUa_-n|p7T+OEMgk=1z43S+sNymUxZQNtoC!X3(>a+M=WJrK>zFj1_ zskMpXMODO|@p~;JBGN8A#}4cwO#V?jbE>EDim4)I~od*x5ZePb*Wq;Kt9B9(O2-{_4OR-MJ+GFJ8J;e4gVljO)?A=B{RM-%7LomYUUt z*REl8S*sUs4I6O>ZCf9o+X2J>-ihb`>>%mS9gZ=FaB^#k@vuWA)0$|=TE2=s%;iIH zh8tTZ-jiHJkhzIfxkZ?yqAj zx0qGDP8P=qB2Z!o$m9dW6%oF>2M7LUcJ~`371b|zBdB`z?i=_@w5UZy>;zTP`b`Lt zVv~Qxn`mjkGWqxjiK``oHOwPpn05A}SfduOHb>zHUCQ!~lD^s^w*4qHvZL)L zvVMFW$0mQ?CvfHgZ^oyvcsKduJ|h*}n_c-iX%;EWp|qR)R?98yJRryjJBih< zH;`C-iu4X7J~)LTG|_IHB0seY>I@cu_(Z%l#8-kQi6l0j-{Ki?2RHs&G|KliR$^0l z{KGrrIOB)<}EEtE$2a399^M_ zFk8A}r~}4W!C66b(OD8}8kaZ_;MRWhnJ>#IBNDqEYVN0ITvwg zQp(!?h*hhUP56;?h6Y&wBdpm{w(m#Mt*syd|5n5^x)ur*#-pjvEt?7VH@tv&Z#J#s z7c3VEI-dRpKVf2Z#2W_al89M1TUZa4@Qs_cLR5^r1S|;Zd`ZXYm%EDG60w>$8iA%d zmXh67q=j3~UmkR&%JAcCkOctmE0iFh(j-#de}J*O;hz@dsIag(VhsG%5sBcji4Piz zU&w_O-tQ7=I9OCCKAWJW&2bc_h4Po!p~3Nrh9+B}sHJIGjr27B9;&5BfyN*%WEI0s@czxXN{g+paF@F%)~`Fkxi(ge53MH73GJ_h)5jCT3jP{>F2)Z5sUXB z3&Dy#+0AR>z%LN7h}T$${D~+Jc@oTa{wYp{`3551+-pR9p<%mbj&E86b9~vb>*!zw zdrUsIUWbrxV&7gT@o`c#_+XkF$JhCm4^kp$TZBj{Ik|ahHAqDr}9k>lXcl^4pa-95bi==`V}(xJb$pW7(c31%N|a`a*ToKyi?d)0CfaKn0z3b{s=9>U zS6vl9N<|y2BFQJ?972yl5=K$l7mp>BrsDA-rNiL2YHp{ug_ZMW@YoHOZ>KGhx!g|E z5v_aGPCvxsZG%4Lm@j7#4}!)mi;j8z=^^yA*2vErDQ=s&ZP7HjwOsMUj6&E+LLeL^AjLY45ipUL zM$?ZVF4?s(xi+!AwJ1Lg|FjnEfYbL-2YnHbR~>Xbb`I@p(}!aYh(1D9hNE2}O5@-v z3-D`mZQ3eKC>QXgjD1j>J_Ev!*AawwtwMnCsX8>Wmg*xS$+*g@VAtwUMCtpx)ulxm zx3%lh)(8&&upWIIr8m?UrGKa|n&oH!maSs_8qnBVgs$TY9aI~)Hv-EB0!a9E3xo%B z+U4p2;g8G#D@kfR4l)2^D;m&?+5uMDkdAT$x&&HjVc3{_-;l=l6i`01pj>s;PjRCI z2=i}7ValOqtegQvp=4LHv_|xqdWX4$a4d?eo#o6u#BMaApEiDl=Op5FJB(?XmH;+D zMzNM0WL1sny@+GGJ%)PqGx%;}42>HgqWkZN5+6;g+%Ov@(bK_E;scoIu?2*$07r2z zKWBsmjCUa`ud!yaw1bZ4&{*0`+sbnJVhT|3ZV5$a@f9AcfC(4*Q* z>{=6g7ajwe(%ZrM`Auneq+V@`VR?zwYDQC%%$3#dSkJo#zStdW#4kIag8b?lu_~}} z%`ng}v5n2B503_MbT}U4;%NKo%E42Pdx&j~qeogF=CUSv4l>xS1t3J(7g_5qc{29R@cHYR88Shzj8k5!3%+G9BKu z;ykY{EWTDkBuND-Kju%A7rZ#OveY&hkxI6(4P>a2oo+)P0dxDeg_>W_a@x{v5w9ra z#A|)FEzQQj*KY@1GL7|ZM~^D8M92jfOCBw&aYjT!ESt5bcS9`4x2H`I2|2qxZHp`N zo^MYdtnMt=a)ES9nrZdfGs2Hnb3KO_pF=M!k^$=Uu8On4Evv;9!NdiY|N+2OV`+44H z1Wb#Izl84recE+_4eml;s;?;GdR8mFCS3)+g`?NTg`u2YqQ`w3wZ;Cd8sL{T9s3EiCyKdP>{Q!u!#IVWuqWyz&-o zYCk%>u3Ad*)|GOKqbsNT(PZoFsn?%=6E4kbE}BeBrO(5v`K!D0p^k#1mWaf6cz30* z^Kar6-~G>gogJn93a1{}VumZeCmo%~c?JK2qw~KXt;kA-(xxHPm3MN>P}(PuI6IV% z3M6I^qX{fym<1P*eu3;`!=TGdDq+KEn?R!XaJn*jj_5O%$s|47F^8QR4z;?GT^~+) zG-K=t@#r;z_Lk>rhy~)#4sP9ZdjoGpLtnvYDjR{d>mqx11YL>8xO*^rRyzxLQ2?`$y4@TKaqPI3?iTl979#*M!1juJaB3$!(=2@c&jLOYr%c zU^ItcPcuz$188*rm(D<$N77s9?8O&~E(4jPoFa3X zlO{!~=}bi5jyS=5MT%W^g56)T6=P@u^uzu!^o(|rtxKn|0T11wbUGBU=&|%0Q2lu< z_Boqa$BY2Ku$lUTIWuSjGvLwxcm{n->k>4C>dwxfA;g}KH-=y%8||_5cj~{m5br}&ObYsPSQfoC{1L~r*R=nCEDiG zk-#s>7c%rzKHe$V%s$RXE!aur3$s3^026*Jhg&Ta^WI5vF z_9t%3=L2lbN2rQnc+@DBniy1S4oU^P zQ3ogBj6=9l2cuzPGsV}h;~>^4&?^k^=tMJ5EP}uVFgv)0W?oCX?)1{8oz)}*a!vwQ zfv}r$0@cX%6dFq#-$J-!#Z+Wo+PTRJE?fMliQC86G1NG4`PNGVfe9?iM}18tI_E9$ zLxb_x`)IX$8~Bg1gd%9(6|83worxiRr3mu3kzFjJcLZGGy(U`9JUWp+0KL9{B1Cfa zib*sjfOdK*ZNP?1qSfaEsgr4aRyc_|1IIHTPog!CwE3%9*kkaQU~oM-KhkbGnN}ZZ zgF<4!+m1vt=9oh3upb|%4Vv+dgXK_*_qaAdrX&AvnQWywfRej|t&APQ63)AN5@OcSwiF4vZ)rfoSTYmgJs5Y`FEg+c$kCOtr#lAjDQD)`mCUs zb_?*`goM>NrZk~JczsG}oxu2vEU`4?=@QyHaM)*-{tL~)fnm638pgytRxO`K>&1#* zBsidkcpso5Ll9Yj(-a4m)I-zY{8`N|PNS>9q1n@+H}Ofd>9o00E%8xu^e{AZA}(aZ zwLUNn{5Pi4X&S=jN=mV0o@bj%vDjT;xig_?&-=H{q!S5VuSl3n>*3LTF6|t1Mi^*3 zI!RuKAmWrX7)NK=^trSOb$jPw54@3Wm`BqBv+cWimPz^LA{xiq&Zk{5Gsn!Q^#fRs z&$nQ0oKK%XhvF8{H6Z%Z0@0yg7KkBlxDb3br_O|hmQv*l={sFC z9bvyZ5=E>5SHwnae2Dej@!VoMqN#i-1Pu!j=SBE|5WzqT?^{x{&vn_NCG<^Hdf!qp z7K@gOvDmW|CZstQP-E2!+gL{L3NZTqW%Lh}oxDnvg};+KQlDE+M`4WV3NgkND@5KW zD@5L(D};lzW&wRAgvy+$cddjJm;i~S?|4^Jvop_qw zrtjL~k9~%gYb{^roK3`$V3O=#yaoY_G~aFmF?e>CvE$q5IE-D-?RYzI6Z^+@Sd)0= zY&-m+jD4}4Hsn71E8A&&v(I>qiP-IpTMrvH&V1Wu zOSU~*4d5XT$+j1&0X$4G+4gcZARd5@w%0hIkBv{;cpGQjaL01)$>^?+mg$j>^@#PO zt*x#EZ3nG8UfJ3OIEJb2C)`{REU>6MI0GKb9VMt)c)@YF)eD>#Bo44B82|S;`?s*E zJ8(2|k%c}-od{FSevbA>EXLE%(I$h1*G1V>NoJ^`6*$rasyj}^Kl zV$o~}25|7^H9Yge$B!XbXY?>dm6t_Q$y8Lh^H+3-3za5im%CvvamnY! z`QY-fEx3bW4P7Z^-F9QIx7q*AZaR^_P5R*TIE8q53!C>mEk@{f_zTcob6Kw!u&&Q# z<6gj|sA6R@b?^l`1;8GAaVWHo?bu6GG(Y=tFKwhPXIJ+^r*2_G_R;3?zw)L>=veC^ zeo6YA&ERZxa=~U+zK_mpZkqa%75r)>I9Ka&=I~L$s39oe7C&u>t<)$##fM4F0rXGx zQ=cAwgb$d|8!YWPTCZiA0HDK|Zu_e>MZB$}MiF7cTk_f#*6jc-!=!)x03EI0C}nXk z(Pfro^vm?_=E~z%Dd-2ZhGnLJ%;PW9j-1?%m%+sfR_hS$$nHHzlR3?)2jLMy-oAtM z&DiOD0`sT@F6{i+g#V}7vho$PuxN|>5WGAy{2LC@D0|3t-dhaw^RH1ijsu#$j!lFK z8~Hlz*+{BUijZ^Uz%ToAl0B z>OFc`b?SY4csAlS981RI>IVo7ZqeW;f!;Uiptza57kEuOjlXcDIReQrnPi{8358z4 zZoEl1Nnbgx9NZYo zq@#3Dt+`V1aa|~y5%LyXWvkh!w**SjTlAw;B|t{?kvFaYxAPW33ELck0>2g92m;|H zY7sWT=Dv;nSp|FlZ92k`4gfaw7*0g{AHxb*!Sat`VW?nxj=}6NWtWbj<_cE(9ePpfvY6uu`+h~@9m1pp=<7r+OP3*gk_G&N+VS|?vWj&q@^LBjG=#u_56g6MMKwoLc7#aPAeVii|a$& zpXERG351!i$A5l`GXXq%d`55mi;T?qjLyI=J@RvUQd{Bw{&PfEgvt5GuR!baCB1G* z_WFumwj{fsqLpaC)l+mRpTh%B@jCL2*yoHrgReaU>xDBkN2~BB zd`n9;6n^q7Ch#V9@+|lG_^+LXt4FI?agKIs9Pq>O4UB=F1p?6SItR|f3we$vs0a>B z>B#YkTp2oWe=%Px;PE;~4+Cq~_jDMq_v7z{7g;-xNwksOc^>}gP0W3sei~ASLj@kj zJLm%KU_D@%c7aZ8_}b6N6h;w-BxyX?9SHW>!lHkGcVilB`2+3S9p=6F<;qG7OEcyjyt=qPI9Lh|-=P=wnbZe+dqH(SPY8)&^~pAHhMIhFA-v|0Xb~ zLNER0e~$^;>#zKo4$`FN;@2f=4xz*%zKPVriosQAFC)=_%@iM0OHa6)p43=q3 z`xTQ0;UvFOU-UNKa^W6;bZuu_f2H5bH)({KA(S8v=GfXRnDVRH%U2+&o7j(6L_A8r ztMsm(qFSjS`K+i>jI&FwzM?rHY5E|v2oJoA>aV;?>u6}khrfx|{Q4Um+50~tX7;Wo zxf#Dh01=5jcO9oLW@6=aD6;RKlGMJs4hNojp-ZqNaNGXJ0O zW8$GQV822x`Kvd4?G-j?yv{F{LI@Eb`?^-?QG&z@Z_2ox@|$!%;;@?7Aur4O+3$}~ z0|cqswYS#_loY)_nmw_WSW*z1$JbOYPC1%AZOC76 zL8@Ai(&P#(dy4yEG+1veF`B9rZIXiF)%Eb{6h$9=L z+s}qOY0(QfT0pF%kWxT>sY}daC$c93^(LVDRj(Yr-{8; z1G&6n2+}kW@ne^T>r;EnTg}3=cws(bxRnXC#C<>s7<*+P+C|U_)^-mVV4n7KqXMjb zoZ1`lWnl%iC-s+gQ53b-rD!8ug-e>aSsR*yzvEn88Q#Q%lD|&vF=1w-=CF9XJt0mc z6L7zrioV71lpG}Vh~F(g#%^zouZhgD+naYcSs1{8*j3vo|09Xhc6*!VQm2AB(zu~u zMnqj3($yKPgJCzX$gu`SVkL+UvG0Zo{+nZDIj)vtnP1<&S&v+?nP6*MQbHqM+u-lr$i5)T-=>wlWjK2%#r{#u zr_q|t{6EgnY?q?Uw0}@1`-#T-!|$_%yX_5Pl0MLEi*52x!^8al;rIO`?zTUs7oS5( z4zzVsPl;=rR$H^(#9uIchA#17K-h@F<&X0Yi60DK0P38+1NhW}|DD9wZ#vlEq4u)k z21s+f7lPIYlG$j&glokpH&@*@2)kdA$E_5b#olYqopvY!fZZRo+u`1QsY+JO;Rx)2TtSdmQ;_SEB8(Rui!b z83rnFstV%Sf=}&nwNvkvXp7Yd4`(j}Ts#KgP+-Rfv7tht9}g1gU4Ywxe_PO$IR!Xp z+b==*Rf^66)f%9PzuzRh*mj4RF;cU|t4#iwqz1SI@Nf&fx59rKa9-gqJb8nrnsgNI zCxCPQ4GQYmDZpz19%D|8F{3nF0svKb@~Plf407C8b1sDj&B@`&=Y0M_;g=xICEENu zt2$5#cm#k|cydnfQ3IKBKj>Sh#`&Ng6aazu@EbK(OOUoo#I+z+mm{BdU?#9Q-Y%s1 z)H46tC{BE&=$^u}m={QQLlx^DU>8qfa#t#N)1a|GuHeN=o1H@XarET4Q?vaHl96g?#*C9=Z#HP9 zf|mmxZvycgp*rTsuEC*V+YU9B@yOs(ZvK_2ip_MYDmW_$J_c~E<;oPDl}K}ruT{@* z#i3nDXMmNd9t|qRW?LM}@gSYUy&6B2vZeT>v^O?;hIAq*RZZ4xZz4ZQ>4TKwn$z`= zs(1|G51@kMb2M8^;4cl5mEY#dhLu?1Vs^}vW1OUV=0N%-5U?%OY~%2FI%tf0ma8FD zvxcMczf=X!7Cl49kmj5*|F)`0kOMfEDgMVozOvUVE>Vyp!SfApF3x`hwP2T`d_YmG zLOK_CyOwCSTk*WCR*=eNHOF*)5dDvk4-OaGqErPZ0kF=WVL=sFA)hzht>znk%D?5Z zTwajQZUi_NhLE6&;}!mSF%}qvQ~*8zV);tVR)puTK^3R2l@;Ho25&ghTs3Cn$t(CO zXeoJK2#zD;HXxo_Ct3QwRcZjQ+8`S=GpGVvg{=5T zrP<<<=F(V+Cszdv73WGeRxkRZexfEQaKv9Z5ICzZsFqf!w92-HsET(1&ZX=)p1fxb zOjd8wY)z3a!LtpX72p%+l=*jjv!uT#T~ zTC#i8q%j#(iG1FHlROYPz6T-=Ogkw z{;e@G3hq}GpF*0;y7_lOiI(jd%~tykKySwnPT-QNU^voun%H>Wtz@oXTTRWGtwe6+ zHn!s%d$U_Bkk5PEMRi~o(%h~v|LQ8yItBPN0IQzWY+do3t7d)qb1do%U(yP6C&;j_C|8w?p0O0* z_$7)HRe*CfX8tu&vPIeBXSud{S@!D-g&vPIH;Kn6_-L)>h!p_N+xw)NW93NmfieHO zD2b>9+&TwatA4$v=qmhT+XXc;$B|)O-21BnKLBp+Z)lLn*$&DMbW;2=8{$y#c8X3s z($)t3BnC3WM2D!T0*|N>GjmcEyn!NQ!iNLS8FMknYS)&&kQpcqo+r&hofi z4@dA{9+v<=QlBTr7P)hL?mTBkdPcU(InL#K^l!jkm(S@C1YNoL?#EnCcb?lfrUs;- z$eW#BD}YlBq=Ni{v2_9vQOfJ`_%f=o07H$Lkpih8Q;>Dl%EhltS5|saj?bClo17KF z0lDdud19RCfyBtk7A?pt${p+SIDPrfu~}{b8Jp!SMp9;sZ^Wqs6o5GM@&z|0)UM7! z{#duqn`44_LXym-h3R9xd7*{rnVHV~JXb!_nVAKlh2EmEJY7f&bBl63faYa-ft8mD z`o3ZxDa`j2(Zb33o{6+D&oyq6AT8VEb$ZhC#{ITO*8NMlz9OUtW zd&qdq;p7Z*j`!y0Nz8vlIc3U&k(lUu+)QR+6kVPt>YKw^EXg?~d}d0Mnedu848H8Bai zK2MG-Z-FKx2Z_9l+=7KN<@RQ}$GLrr%#^nvJ;PO|g>gD=Z?An@JE7d?>x;xLy zYJy!5p-FCE_DZdR=!(M5%J<}^3m&Y}>RL10d6}-s`B|&2Q1-+XdsyQ&nwZ9BBRsBx z9CXc@x0Y?VVz2+eI!#EUne8pear@TStkmmE_xMC>Hq@jAxtHZCG%?#uB4}*xMizC| z-Y{lUO;S9^>2Z(C_HAbUuG$;Bw`dKm-4OZSqO2_Ue$aVD^vmWgnHLR6n?~d&^piFOP3a6j@5BwPTVYst_hD)~|cs(V(|^ z8%>^g@+ZA3nWALqJxN)$(BsNFUIPFQy&up`P!b=ox!3G*`1IW~*X&LDd{oU}EHwYp zqGe@V3ywogC*%x~jl`OZ6@C(wOu>|owI(d#PkR%5@u%sZ_V`YpN*)NNI@2*7@-jb@ z>MXA)C+G7TO!Q(=Kgsg`w8u33LdvyjJ74Jeh!%NcMe2?!tS(oGm zU!VD+d2Z>u_J-ZRmo?_(=T9gqI4|qT1sA|>VP;&AMNN>z`+=RlZg(Rr`tBR{EbT}C zo*VWIP5Wti1EWo9l~yaqHQD9q-FvEdCM93e>MBT{YqBrNmp{Rk*CzSr05r?x%gA;@ z9CMSBf6oS}<3DS@I{rFuQCLEJg6q>Suz zPdwH^udhw=Ox@ve6?t7r9#`kyz3;iBbDQK@`m2$wN;d|xevOR`|C4o$rFJ@7|1dhx z)Fv|LS|ek*f$)sQaYhR|SN|@I72ahu^dFBij??0KdWhSbo>P#WKA-;*`hNlcB{btg z-JX*_u2ae)J~ofFMexgO!nFY>`2&dPFm7VBXm#b?SAJ<^#61HdVl z;ibCWnT6ysJY9G$*F!mKG0f_TEA)^oNu23M)+7(cex+{DDk$=0tkUi2Vrn#W%|_#Z@Qg}xe0N@kXPc!w zR{rg}BTyd6EKh#!v!XR5FX+)~yPyDj^{A{oY$WpYCg$f9`CR+-+B}_J_`2-ZLwOcV&(1IEAzojm6UhU51pnpEo5)iyfm(i%2PTB8fTv&9Ym1!xVhiw~ zF7_~mTyIHR;}t!ew+aigH^Y5MjR8-=rpX&usYg%B_sDDkIIM?pbRi3`%A!&*{;7-2 zkP?p9^hl28%?4&MHhQn?p*)S<#-um&+9C^T6*>qtWlqU^Q#MN#=8z*2A}SY_u=l8} zE!WFoZ%JOdz3#j$z7V+HmhEzT^V0KG zbKWyEA9H!~kIRO7-Q)1RZ-G~Y8R_I%A3&H*H!rLB7}Rb5C@A!d^)-8WevTAm+@w?|qDL!oqh>#H8 zy{}b`;4{?eW3F%19D+&gMEA}pk-;84V}jE)x!_y9F0ZaoOcxKNQ`q-s(F%}+3>D# zv|&5-6LcVN4t`d#v^L=ICDndT%#)Y-vz3777gZ8_<;!Nt^nzdI$Ya}lMHS{7Z77+m z*1~zeslwP;|8CM5>-|HDS7EL@ugH5%FBbEsupp<%`=^PNnR#9P1kD@jClt_4=z76R z{4OE21G>qXC$-ZwA~k5COim{X9ja{xk=hhLot>RIuFj<@vv>kPGf9LX1~q}yEFu*x z&*7S5Hj$GR$vLEE6S7oshvQ@Tmw_YbVR4QV3S$nOMn~(h@w)>uVXxF5o zJg&Q3nejRKdE=5dvh?;wBgB?J)*e>WX12P$5z}Z3y6McAobD_r^0=H6-5y_2dd^mI zm@V#LjAku57~^q!XITg1FkYJ&(b4c}PxHqE%+bjRVXab(dgK{0g=t-k`#Go%o7=^3 zAPjzSXX8N@pJEtnO^Ojgled#BiS$G#qZWCVluG*Fbu!wM9VUUi&PF1x;zZdTvXiWr zD5JX=1M!i)iY`Wmwwuwe#?+8KWFp_e;7SCe3ESG$xDOlCKf4-tVq0(-m*sXd z+To_g)!mF-yfhZt-6+E+V^?-Zw_jvh52Fb=K$2PQ1x905)gA49NdWQj(Pll2Hn`|) zP!FROE=McqVYF^^h}07{;BC@Mgke22zaS~OlI`TUhuLR6j96?Juk}$lZANm-f{^VZ9T{?Y}>@f<7oIRlI$@(n3t+7hD!>oCw5iz`XpNZ!N$c zEN+ky#+KiS4Z>NnJb*0VowV=Rh(Si*dglmie^~vu4@kq-2mdA|f6q1!GKS+_;-3c@ zDf9xlsjr%onk)*JcdyM*Q;b4fwkL;PjMjw3n@7uw~XuQ?f zZ-~)JzeHH!5TlL$Ghyq7814062s<{!7^Yt)tm#mseJIP{vtO~i;YKt4 z24O3B)=e|(yWvJ#t%TJbVRRtV=-r%l0&|Ukcu!~LBaF`44EE0vMky(!ty$c?m|!#6 z5BC_4vg7xlpR@SyDcWoadhK3-aHZ9~Mgrd5)JGay$UK@P+49Orqb{9~Hz4^SKEdu8 zWi-S!`{PF$v1B1lVMFdWnzQYrjHr5xs2qTyVgT@OQgRtPHVO)$oTc1nOvAe;d+x&s zFJblXH|F5gw)OWL+3;(IjD}1vXMIN-^T-O?i;XBSaN+1^3^qd>M5@UHMs2c^_K~T9 z4;U@TD(Yj}K%*I3@PKh2-l8uGq;6rU4;nFKjRm1A+xZ~qt)-6zummWKb$kfgWgQ## zkg*8&Eq?hBH0=g<^&w*;-lSUfu(2Eg^Bo^Cx{yuuPS)uWXr8H$82sX%=kS|sp~)QB zwANtm7D~@a!pfSwm39?CR`*e(A$bySF>w^*_IzJb@>6X1qsAyG<0l`5kUhh`e$==F zYB$M=9q@MMbQ<@NXK8;vdFru4*-*ppI*pc?Kg}&5#~34wJV&zw(>0t`jW;M8Fa}d^ zC%uJ@NQZjaGzR_M#rcO+^nLIwxnbnrn(j$T-p#7U7+uL8hy~A#x`+Ethhw$MOG|?*D8w;S3n`9U**tW4oA=EN{FNazdpc5GY9bk868hy!2 zbRb*o!gMN|XV}@sOz6Xx*?XBr0Tgtq%jgdUUE(qx#4VnmyU_VcmXL+QhuP9B<96~Y zeS~u>hF#7wZX^Gsy=7|TI4Hl@SkX8L_3P~EaVY)o}dk^;L3!&V5kdn8a3ZvhS~ z@VRqgEhQy?&N}C!nv<*`7sLDoTajxt!>wd{bFq+o#XijigHN%@Jfmr&(^M{?vhD#n z={_G9kgxq|c_>M~q4{z$mgGZEp7DR0Z``VB-};*rVnNl;vH>3BC^WOfYvkdEiej%( zj?bt%d`23sbhyW7EY-9N{8k#6zFuuW*GBhX4q>Ix!r|5d;XN9~VoEMtS_0A)FwFvn|GBf==n(&;y-5u`ZUVwJBhWDgo?^Qf7|G;m-q14Wsh*24eV?&EEK?~9 zje%sFy{}A_Erg-8-F}Nqy}J;i@hm!#z6j-_7a4W59V}rH=F@Y`xd;-v6I77bfE`!_ z?(bq+nbDo>wvUtrhLu74?O|nQpz^$ZC|5kq*%xIPgct0sBuaca*15g*#{#>+zN_pf4S WQk3Vaw1@KtTw4GgX0I(X*#7~R|KXbe diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go index 0d90c9845af..72e4940dba3 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go @@ -28,7 +28,7 @@ import ( const ( opaWasmABIVersionVal = 1 opaWasmABIVersionVar = "opa_wasm_abi_version" - opaWasmABIMinorVersionVal = 2 + opaWasmABIMinorVersionVal = 3 opaWasmABIMinorVersionVar = "opa_wasm_abi_minor_version" ) diff --git a/vendor/github.com/open-policy-agent/opa/internal/config/config.go b/vendor/github.com/open-policy-agent/opa/internal/config/config.go new file mode 100644 index 00000000000..82be84dea06 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/config/config.go @@ -0,0 +1,168 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package config implements helper functions to parse OPA's configuration. +package config + +import ( + "encoding/json" + "fmt" + "os" + "regexp" + "strings" + + "github.com/ghodss/yaml" + + "github.com/open-policy-agent/opa/internal/strvals" + "github.com/open-policy-agent/opa/keys" + "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/plugins/rest" + "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/util" +) + +// ServiceOptions stores the options passed to ParseServicesConfig +type ServiceOptions struct { + Raw json.RawMessage + AuthPlugin rest.AuthPluginLookupFunc + Keys map[string]*keys.Config + Logger logging.Logger + DistributedTacingOpts tracing.Options +} + +// ParseServicesConfig returns a set of named service clients. The service +// clients can be specified either as an array or as a map. Some systems (e.g., +// Helm) do not have proper support for configuration values nested under +// arrays, so just support both here. +func ParseServicesConfig(opts ServiceOptions) (map[string]rest.Client, error) { + + services := map[string]rest.Client{} + + var arr []json.RawMessage + var obj map[string]json.RawMessage + + if err := util.Unmarshal(opts.Raw, &arr); err == nil { + for _, s := range arr { + client, err := rest.New(s, opts.Keys, rest.AuthPluginLookup(opts.AuthPlugin), rest.Logger(opts.Logger), rest.DistributedTracingOpts(opts.DistributedTacingOpts)) + if err != nil { + return nil, err + } + services[client.Service()] = client + } + } else if util.Unmarshal(opts.Raw, &obj) == nil { + for k := range obj { + client, err := rest.New(obj[k], opts.Keys, rest.Name(k), rest.AuthPluginLookup(opts.AuthPlugin), rest.Logger(opts.Logger), rest.DistributedTracingOpts(opts.DistributedTacingOpts)) + if err != nil { + return nil, err + } + services[client.Service()] = client + } + } else { + // Return error from array decode as that is the default format. + return nil, err + } + + return services, nil +} + +// Load implements configuration file loading. The supplied config file will be +// read from disk (if specified) and overrides will be applied. If no config file is +// specified, the overrides can still be applied to an empty config. +func Load(configFile string, overrides []string, overrideFiles []string) ([]byte, error) { + baseConf := map[string]interface{}{} + + // User specified config file + if configFile != "" { + var bytes []byte + var err error + bytes, err = os.ReadFile(configFile) + if err != nil { + return nil, err + } + + processedConf := subEnvVars(string(bytes)) + + if err := yaml.Unmarshal([]byte(processedConf), &baseConf); err != nil { + return nil, fmt.Errorf("failed to parse %s: %s", configFile, err) + } + } + + overrideConf := map[string]interface{}{} + + // User specified a config override via --set + for _, override := range overrides { + processedOverride := subEnvVars(override) + if err := strvals.ParseInto(processedOverride, overrideConf); err != nil { + return nil, fmt.Errorf("failed parsing --set data: %s", err) + } + } + + // User specified a config override value via --set-file + for _, override := range overrideFiles { + reader := func(rs []rune) (interface{}, error) { + bytes, err := os.ReadFile(string(rs)) + value := strings.TrimSpace(string(bytes)) + return value, err + } + if err := strvals.ParseIntoFile(override, overrideConf, reader); err != nil { + return nil, fmt.Errorf("failed parsing --set-file data: %s", err) + } + } + + // Merge together base config file and overrides, prefer the overrides + conf := mergeValues(baseConf, overrideConf) + + // Take the patched config and marshal back to YAML + return yaml.Marshal(conf) +} + +// regex looking for ${...} notation strings +var envRegex = regexp.MustCompile(`(?U:\${.*})`) + +// subEnvVars will look for any environment variables in the passed in string +// with the syntax of ${VAR_NAME} and replace that string with ENV[VAR_NAME] +func subEnvVars(s string) string { + updatedConfig := envRegex.ReplaceAllStringFunc(s, func(s string) string { + // Trim off the '${' and '}' + if len(s) <= 3 { + // This should never happen.. + return "" + } + varName := s[2 : len(s)-1] + + // Lookup the variable in the environment. We play by + // bash rules.. if its undefined we'll treat it as an + // empty string instead of raising an error. + return os.Getenv(varName) + }) + + return updatedConfig +} + +// mergeValues will merge source and destination map, preferring values from the source map +func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[string]interface{} { + for k, v := range src { + // If the key doesn't exist already, then just set the key to that value + if _, exists := dest[k]; !exists { + dest[k] = v + continue + } + nextMap, ok := v.(map[string]interface{}) + // If it isn't another map, overwrite the value + if !ok { + dest[k] = v + continue + } + // Edge case: If the key exists in the destination, but isn't a map + destMap, isMap := dest[k].(map[string]interface{}) + // If the source map has a map for this key, prefer it + if !isMap { + dest[k] = v + continue + } + // If we got to this point, it is a map in both, so merge them + dest[k] = mergeValues(destMap, nextMap) + } + return dest +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/errors/join.go b/vendor/github.com/open-policy-agent/opa/internal/errors/join.go new file mode 100644 index 00000000000..8d8e1f301db --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/errors/join.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if errs contains no non-nil values. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + var b []byte + for i, err := range e.errs { + if i > 0 { + b = append(b, '\n') + } + b = append(b, err.Error()...) + } + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go b/vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go new file mode 100644 index 00000000000..666f3c783e7 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go @@ -0,0 +1,7 @@ +//go:build go1.20 + +package errors + +import "errors" + +var Join = errors.Join diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go index 889f0fda456..58d10c1f76e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go @@ -716,7 +716,7 @@ func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{ // minLength & maxLength: if currentSubSchema.minLength != nil { - if utf8.RuneCount([]byte(stringValue)) < *currentSubSchema.minLength { + if utf8.RuneCountInString(stringValue) < *currentSubSchema.minLength { result.addInternalError( new(StringLengthGTEError), context, @@ -726,7 +726,7 @@ func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{ } } if currentSubSchema.maxLength != nil { - if utf8.RuneCount([]byte(stringValue)) > *currentSubSchema.maxLength { + if utf8.RuneCountInString(stringValue) > *currentSubSchema.maxLength { result.addInternalError( new(StringLengthLTEError), context, diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go index 93c935b3526..9c3fd7332d4 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go @@ -1565,16 +1565,14 @@ func (p *Planner) planComprehension(body ast.Body, closureIter planiter, target // below. p.vars.Push(map[ast.Var]ir.Local{}) prev := p.curr - p.curr = &ir.Block{} + block := &ir.Block{} + p.curr = block ploc := p.loc - if err := p.planQuery(body, 0, func() error { - return closureIter() - }); err != nil { + if err := p.planQuery(body, 0, closureIter); err != nil { return err } - block := p.curr p.curr = prev p.loc = ploc p.vars.Pop() @@ -1737,11 +1735,12 @@ func (p *Planner) planRefData(virtual *ruletrie, base *baseptr, ref ast.Ref, ind }}, }} p.curr = outerBlock - return p.planRefRec(ref, index+1, func() error { // rest of the ref - p.curr = prev - p.appendStmt(&ir.BlockStmt{Blocks: []*ir.Block{outerBlock}}) - return iter() - }) + if err := p.planRefRec(ref, index+1, iter); err != nil { // rest of the ref + return err + } + p.curr = prev + p.appendStmt(&ir.BlockStmt{Blocks: []*ir.Block{outerBlock}}) + return nil }) } } @@ -2236,6 +2235,7 @@ func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir. var index int // ref[0] is data, ignore +outer: for i := 1; i < len(ref); i++ { index = i r := ref[i] @@ -2259,7 +2259,7 @@ func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir. } } case ast.String: - // take matching children + // take all children that either match or have a var key for _, node := range nodes { if node := node.Get(r); node != nil { nextNodes = append(nextNodes, node) @@ -2272,15 +2272,44 @@ func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir. nodes = nextNodes - // if all nodes have 0 children, abort ref check and optimize - all := true + // if all nodes have rules() > 0, abort ref check and optimize + // NOTE(sr): for a.b[c] = ... and a.b.d = ..., we stop at a.b, as its rules() + // will collect the children rules + // We keep the "all nodes have 0 children" check since it's cheaper and might + // let us break, too. + all := 0 for _, node := range nodes { - all = all && len(node.Children()) == 0 + all += node.ChildrenCount() } - if all { + if all == 0 { p.debugf("ref %s: all nodes have 0 children, break", ref[0:index+1]) break } + + // NOTE(sr): we only need this check for the penultimate part: + // When planning the ref data.pkg.a[input.x][input.y], + // We want to capture this situation: + // a.b[c] := "x" if c := "c" + // a.b.d := "y" + // + // Not this: + // a.b[c] := "x" if c := "c" + // a.d := "y" + // since the length doesn't add up. Even if input.x was "d", the second + // rule (a.d) wouldn't contribute anything to the result, since we cannot + // "dot it". + if index == len(ref)-2 { + for _, node := range nodes { + anyNonGround := false + for _, r := range node.Rules() { + anyNonGround = anyNonGround || !r.Ref().IsGround() + } + if anyNonGround { + p.debugf("ref %s: at least one node has 1+ non-ground ref rules, break", ref[0:index+1]) + break outer + } + } + } } var res [][]*ast.Rule @@ -2295,7 +2324,7 @@ func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir. for _, node := range nodes { // we're done with ref, check if there's only ruleset leaves; collect rules if index == len(ref)-1 { - if len(node.Rules()) == 0 && len(node.Children()) > 0 { + if len(node.Rules()) == 0 && node.ChildrenCount() > 0 { p.debugf("no optimization of %s: unbalanced ruletrie", ref) return dont() } diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go index 114a25ae2be..06fa6d158d6 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go @@ -164,6 +164,10 @@ func (t *ruletrie) LookupOrInsert(key ast.Ref) *ruletrie { return t.Insert(key) } +func (t *ruletrie) ChildrenCount() int { + return len(t.children) +} + func (t *ruletrie) Children() []ast.Value { if t == nil { return nil diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go new file mode 100644 index 00000000000..179b5b5d5e7 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go @@ -0,0 +1,148 @@ +package aws + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/big" + "net/http" + "strings" + "time" + + "github.com/open-policy-agent/opa/internal/version" + "github.com/open-policy-agent/opa/logging" +) + +// Values taken from +// https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_GetAuthorizationToken.html +const ( + ecrGetAuthorizationTokenTarget = "AmazonEC2ContainerRegistry_V20150921.GetAuthorizationToken" + ecrEndpointFmt = "https://ecr.%s.amazonaws.com/" +) + +// ECR is used to request tokens from Elastic Container Registry. +type ECR struct { + // endpoint returns the region-specifc ECR endpoint. + // It can be overridden by tests. + endpoint func(region string) string + + // client is used to send authorization tokens requests. + client *http.Client + + logger logging.Logger +} + +func NewECR(logger logging.Logger) *ECR { + return &ECR{ + endpoint: func(region string) string { + return fmt.Sprintf(ecrEndpointFmt, region) + }, + client: &http.Client{}, + logger: logger, + } +} + +// GetAuthorizationToken requests a token that can be used to authenticate image pull requests. +func (e *ECR) GetAuthorizationToken(ctx context.Context, creds Credentials, signatureVersion string) (ECRAuthorizationToken, error) { + endpoint := e.endpoint(creds.RegionName) + body := strings.NewReader("{}") + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, body) + if err != nil { + return ECRAuthorizationToken{}, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("X-Amz-Target", ecrGetAuthorizationTokenTarget) + req.Header.Set("Accept-Encoding", "identity") + req.Header.Set("Content-Type", "application/x-amz-json-1.1") + req.Header.Set("User-Agent", version.UserAgent) + + e.logger.Debug("Signing ECR authorization token request") + + if err := SignRequest(req, "ecr", creds, time.Now(), signatureVersion); err != nil { + return ECRAuthorizationToken{}, fmt.Errorf("failed to sign request: %w", err) + } + + resp, err := DoRequestWithClient(req, e.client, "ecr get authorization token", e.logger) + if err != nil { + return ECRAuthorizationToken{}, err + } + + var data struct { + AuthorizationData []struct { + AuthorizationToken string `json:"authorizationToken"` + ExpiresAt json.Number `json:"expiresAt"` + } `json:"authorizationData"` + } + if err := json.Unmarshal(resp, &data); err != nil { + return ECRAuthorizationToken{}, fmt.Errorf("failed to unmarshal response: %w", err) + } + + if len(data.AuthorizationData) < 1 { + return ECRAuthorizationToken{}, errors.New("empty authorization data") + } + + // The GetAuthorizationToken request returns a list of tokens for + // backwards compatibility reasons. We should only ever get one token back + // because we don't define any registryIDs in the request. + // See https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_GetAuthorizationToken.html#API_GetAuthorizationToken_ResponseSyntax + resultToken := data.AuthorizationData[0] + + expiresAt, err := parseTimestamp(resultToken.ExpiresAt) + if err != nil { + return ECRAuthorizationToken{}, fmt.Errorf("failed to parse expiresAt: %w", err) + } + + return ECRAuthorizationToken{ + AuthorizationToken: resultToken.AuthorizationToken, + ExpiresAt: expiresAt, + }, nil +} + +// ECRAuthorizationToken can sign requests to AWS ECR. +// +// It corresponds to data returned by the AWS GetAuthorizationToken API. +// See https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_AuthorizationData.html +type ECRAuthorizationToken struct { + AuthorizationToken string + ExpiresAt time.Time +} + +// IsValid returns true if the token is set and not expired. +// It respects a margin of error for time handling and will mark it as expired early. +func (t *ECRAuthorizationToken) IsValid() bool { + const tokenExpirationMargin = 5 * time.Minute + + expired := time.Now().Add(tokenExpirationMargin).After(t.ExpiresAt) + return t.AuthorizationToken != "" && !expired +} + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// parseTimestamp parses the AWS format for timestamps. +// The time precision is in milliseconds. +// +// The logic is taken from +// https://github.com/aws/aws-sdk-go/blob/41717ba2c04d3fd03f94d09ea984a10899574935/private/protocol/json/jsonutil/unmarshal.go#L294-L302 +func parseTimestamp(raw json.Number) (time.Time, error) { + s := raw.String() + + float, ok := new(big.Float).SetString(s) + if !ok { + return time.Time{}, fmt.Errorf("not a float: %q", raw) + } + + // The float is expected to be in second resolution with millisecond + // decimal places. + // Multiply by millisecondsFloat to obtain an integer in millisecond + // resolution + ms, _ := float.Mul(float, millisecondsFloat).Int64() + + // Multiply again to obtain nanosecond resolution for time.Unix + ns := ms * 1e6 + + t := time.Unix(0, ns).UTC() + + return t, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go index eaa1a3e8c3a..79662b10ff1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go @@ -5,9 +5,13 @@ package aws import ( + "bytes" "crypto/hmac" "crypto/sha256" + "errors" "fmt" + "io" + "net/http" "net/url" "sort" "strings" @@ -74,6 +78,42 @@ func sortKeys(strMap map[string][]string) []string { return keys } +// SignRequest modifies an http.Request to include an AWS V4 signature based on the provided credentials. +func SignRequest(req *http.Request, service string, creds Credentials, theTime time.Time, sigVersion string) error { + // General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html + // S3 ref. https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html + // APIGateway ref. https://docs.aws.amazon.com/apigateway/api-reference/signing-requests/ + + var body []byte + if req.Body == nil { + body = []byte("") + } else { + var err error + body, err = io.ReadAll(req.Body) + if err != nil { + return errors.New("error getting request body: " + err.Error()) + } + // Since ReadAll consumed the body ReadCloser, we must create a new ReadCloser for the request so that the + // subsequent read starts from the beginning + req.Body = io.NopCloser(bytes.NewReader(body)) + } + + now := theTime.UTC() + + if sigVersion == "4a" { + signedHeaders := SignV4a(req.Header, req.Method, req.URL, body, service, creds, now) + req.Header = signedHeaders + } else { + authHeader, awsHeaders := SignV4(req.Header, req.Method, req.URL, body, service, creds, now) + req.Header.Set("Authorization", authHeader) + for k, v := range awsHeaders { + req.Header.Add(k, v) + } + } + + return nil +} + // SignV4 modifies a map[string][]string of headers to generate an AWS V4 signature + headers based on the config/credentials provided. func SignV4(headers map[string][]string, method string, theURL *url.URL, body []byte, service string, awsCreds Credentials, theTime time.Time) (string, map[string]string) { // General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html @@ -118,7 +158,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] } // the "canonical request" is the normalized version of the AWS service access - // that we're attempting to perform; in this case, a GET from an S3 bucket + // that we're attempting to perform canonicalReq := method + "\n" // HTTP method canonicalReq += theURL.EscapedPath() + "\n" // URI-escaped path canonicalReq += theURL.RawQuery + "\n" // RAW Query String diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go new file mode 100644 index 00000000000..e1e8a949f50 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go @@ -0,0 +1,45 @@ +package aws + +import ( + "errors" + "io" + "net/http" + + "github.com/open-policy-agent/opa/logging" +) + +// DoRequestWithClient is a convenience function to get the body of an http response with +// appropriate error-handling boilerplate and logging. +func DoRequestWithClient(req *http.Request, client *http.Client, desc string, logger logging.Logger) ([]byte, error) { + resp, err := client.Do(req) + if err != nil { + // some kind of catastrophe talking to the service + return nil, errors.New(desc + " HTTP request failed: " + err.Error()) + } + defer resp.Body.Close() + + logger.WithFields(map[string]interface{}{ + "url": req.URL.String(), + "status": resp.Status, + "headers": resp.Header, + }).Debug("Received response from " + desc + " service.") + + if resp.StatusCode != 200 { + if logger.GetLevel() == logging.Debug { + body, err := io.ReadAll(resp.Body) + if err == nil { + logger.Debug("Error response with response body: %s", body) + } + } + // could be 404 for role that's not available, but cover all the bases + return nil, errors.New(desc + " HTTP request returned unexpected status: " + resp.Status) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + // deal with problems reading the body, whatever those might be + return nil, errors.New(desc + " HTTP response body could not be read: " + err.Error()) + } + + return body, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go new file mode 100644 index 00000000000..fc3f43fad29 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go @@ -0,0 +1,213 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package init is an internal package with helpers for data and policy loading during initialization. +package init + +import ( + "context" + "fmt" + "io/fs" + "path/filepath" + "strings" + + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/bundle" + storedversion "github.com/open-policy-agent/opa/internal/version" + "github.com/open-policy-agent/opa/loader" + "github.com/open-policy-agent/opa/metrics" + "github.com/open-policy-agent/opa/storage" +) + +// InsertAndCompileOptions contains the input for the operation. +type InsertAndCompileOptions struct { + Store storage.Store + Txn storage.Transaction + Files loader.Result + Bundles map[string]*bundle.Bundle + MaxErrors int + EnablePrintStatements bool +} + +// InsertAndCompileResult contains the output of the operation. +type InsertAndCompileResult struct { + Compiler *ast.Compiler + Metrics metrics.Metrics +} + +// InsertAndCompile writes data and policy into the store and returns a compiler for the +// store contents. +func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*InsertAndCompileResult, error) { + if len(opts.Files.Documents) > 0 { + if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.Path{}, opts.Files.Documents); err != nil { + return nil, fmt.Errorf("storage error: %w", err) + } + } + + policies := make(map[string]*ast.Module, len(opts.Files.Modules)) + + for id, parsed := range opts.Files.Modules { + policies[id] = parsed.Parsed + } + + compiler := ast.NewCompiler(). + SetErrorLimit(opts.MaxErrors). + WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)). + WithEnablePrintStatements(opts.EnablePrintStatements) + m := metrics.New() + + activation := &bundle.ActivateOpts{ + Ctx: ctx, + Store: opts.Store, + Txn: opts.Txn, + Compiler: compiler, + Metrics: m, + Bundles: opts.Bundles, + ExtraModules: policies, + } + + err := bundle.Activate(activation) + if err != nil { + return nil, err + } + + // Policies in bundles will have already been added to the store, but + // modules loaded outside of bundles will need to be added manually. + for id, parsed := range opts.Files.Modules { + if err := opts.Store.UpsertPolicy(ctx, opts.Txn, id, parsed.Raw); err != nil { + return nil, fmt.Errorf("storage error: %w", err) + } + } + + // Set the version in the store last to prevent data files from overwriting. + if err := storedversion.Write(ctx, opts.Store, opts.Txn); err != nil { + return nil, fmt.Errorf("storage error: %w", err) + } + + return &InsertAndCompileResult{Compiler: compiler, Metrics: m}, nil +} + +// LoadPathsResult contains the output loading a set of paths. +type LoadPathsResult struct { + Bundles map[string]*bundle.Bundle + Files loader.Result +} + +// WalkPathsResult contains the output loading a set of paths. +type WalkPathsResult struct { + BundlesLoader []BundleLoader + FileDescriptors []*Descriptor +} + +// BundleLoader contains information about files in a bundle +type BundleLoader struct { + DirectoryLoader bundle.DirectoryLoader + IsDir bool +} + +// Descriptor contains information about a file +type Descriptor struct { + Root string + Path string +} + +// LoadPaths reads data and policy from the given paths and returns a set of bundles or +// raw loader file results. +func LoadPaths(paths []string, + filter loader.Filter, + asBundle bool, + bvc *bundle.VerificationConfig, + skipVerify bool, + processAnnotations bool, + caps *ast.Capabilities, + fsys fs.FS) (*LoadPathsResult, error) { + + if caps == nil { + caps = ast.CapabilitiesForThisVersion() + } + + var result LoadPathsResult + var err error + + if asBundle { + result.Bundles = make(map[string]*bundle.Bundle, len(paths)) + for _, path := range paths { + result.Bundles[path], err = loader.NewFileLoader(). + WithFS(fsys). + WithBundleVerificationConfig(bvc). + WithSkipBundleVerification(skipVerify). + WithFilter(filter). + WithProcessAnnotation(processAnnotations). + WithCapabilities(caps). + AsBundle(path) + if err != nil { + return nil, err + } + } + return &result, nil + } + + files, err := loader.NewFileLoader(). + WithFS(fsys). + WithProcessAnnotation(processAnnotations). + WithCapabilities(caps). + Filtered(paths, filter) + if err != nil { + return nil, err + } + + result.Files = *files + + return &result, nil +} + +// WalkPaths reads data and policy from the given paths and returns a set of bundle directory loaders +// or descriptors that contain information about files. +func WalkPaths(paths []string, filter loader.Filter, asBundle bool) (*WalkPathsResult, error) { + + var result WalkPathsResult + + if asBundle { + result.BundlesLoader = make([]BundleLoader, len(paths)) + for i, path := range paths { + bundleLoader, isDir, err := loader.GetBundleDirectoryLoader(path) + if err != nil { + return nil, err + } + + result.BundlesLoader[i] = BundleLoader{ + DirectoryLoader: bundleLoader, + IsDir: isDir, + } + } + return &result, nil + } + + result.FileDescriptors = []*Descriptor{} + for _, path := range paths { + filePaths, err := loader.FilteredPaths([]string{path}, filter) + if err != nil { + return nil, err + } + + for _, fp := range filePaths { + // Trim off the root directory and return path as if chrooted + cleanedPath := strings.TrimPrefix(fp, path) + if path == "." && filepath.Base(fp) == bundle.ManifestExt { + cleanedPath = fp + } + + if !strings.HasPrefix(cleanedPath, "/") { + cleanedPath = "/" + cleanedPath + } + + result.FileDescriptors = append(result.FileDescriptors, &Descriptor{ + Root: path, + Path: cleanedPath, + }) + } + } + + return &result, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go new file mode 100644 index 00000000000..019dc87bb9b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go @@ -0,0 +1,33 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package strvals provides tools for working with strval lines. + +OPA runtime config supports a compressed format for YAML settings which we call strvals. +The format is roughly like this: + + name=value,topname.subname=value + +The above is equivalent to the YAML document + + name: value + topname: + subname: value + +This package provides a parser and utilities for converting the strvals format +to other formats. +*/ +package strvals diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go new file mode 100644 index 00000000000..0fc8936af16 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go @@ -0,0 +1,431 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strvals + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/ghodss/yaml" +) + +// ErrNotList indicates that a non-list was treated as a list. +var ErrNotList = errors.New("not a list") + +// MaxIndex is the maximum index that will be allowed by setIndex. +// The default value 65536 = 1024 * 64 +var MaxIndex = 65536 + +// ToYAML takes a string of arguments and converts to a YAML document. +func ToYAML(s string) (string, error) { + m, err := Parse(s) + if err != nil { + return "", err + } + d, err := yaml.Marshal(m) + return string(d), err +} + +// Parse parses a set line. +// +// A set line is of the form name1=value1,name2=value2 +func Parse(s string) (map[string]interface{}, error) { + vals := map[string]interface{}{} + scanner := bytes.NewBufferString(s) + t := newParser(scanner, vals, false) + err := t.parse() + return vals, err +} + +// ParseString parses a set line and forces a string value. +// +// A set line is of the form name1=value1,name2=value2 +func ParseString(s string) (map[string]interface{}, error) { + vals := map[string]interface{}{} + scanner := bytes.NewBufferString(s) + t := newParser(scanner, vals, true) + err := t.parse() + return vals, err +} + +// ParseInto parses a strvals line and merges the result into dest. +// +// If the strval string has a key that exists in dest, it overwrites the +// dest version. +func ParseInto(s string, dest map[string]interface{}) error { + scanner := bytes.NewBufferString(s) + t := newParser(scanner, dest, false) + return t.parse() +} + +// ParseIntoFile parses a filevals line and merges the result into dest. +// +// This method always returns a string as the value. +func ParseIntoFile(s string, dest map[string]interface{}, runesToVal runesToVal) error { + scanner := bytes.NewBufferString(s) + t := newFileParser(scanner, dest, runesToVal) + return t.parse() +} + +// ParseIntoString parses a strvals line and merges the result into dest. +// +// This method always returns a string as the value. +func ParseIntoString(s string, dest map[string]interface{}) error { + scanner := bytes.NewBufferString(s) + t := newParser(scanner, dest, true) + return t.parse() +} + +// parser is a simple parser that takes a strvals line and parses it into a +// map representation. +// +// where sc is the source of the original data being parsed +// where data is the final parsed data from the parses with correct types +// where st is a boolean to figure out if we're forcing it to parse values as string +type parser struct { + sc *bytes.Buffer + data map[string]interface{} + runesToVal runesToVal +} + +type runesToVal func([]rune) (interface{}, error) + +func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser { + rs2v := func(rs []rune) (interface{}, error) { + return typedVal(rs, stringBool), nil + } + return &parser{sc: sc, data: data, runesToVal: rs2v} +} + +func newFileParser(sc *bytes.Buffer, data map[string]interface{}, runesToVal runesToVal) *parser { + return &parser{sc: sc, data: data, runesToVal: runesToVal} +} + +func (t *parser) parse() error { + for { + err := t.key(t.data) + if err == nil { + continue + } + if err == io.EOF { + return nil + } + return err + } +} + +func runeSet(r []rune) map[rune]bool { + s := make(map[rune]bool, len(r)) + for _, rr := range r { + s[rr] = true + } + return s +} + +func (t *parser) key(data map[string]interface{}) error { + stop := runeSet([]rune{'=', '[', ',', '.'}) + for { + switch k, last, err := runesUntil(t.sc, stop); { + case err != nil: + if len(k) == 0 { + return err + } + return fmt.Errorf("key %q has no value", string(k)) + //set(data, string(k), "") + //return err + case last == '[': + // We are in a list index context, so we need to set an index. + i, err := t.keyIndex() + if err != nil { + return fmt.Errorf("error parsing index: %s", err) + } + kk := string(k) + // Find or create target list + list := []interface{}{} + if _, ok := data[kk]; ok { + list = data[kk].([]interface{}) + } + + // Now we need to get the value after the ]. + list, err = t.listItem(list, i) + set(data, kk, list) + return err + case last == '=': + //End of key. Consume =, Get value. + // FIXME: Get value list first + vl, e := t.valList() + switch e { + case nil: + set(data, string(k), vl) + return nil + case io.EOF: + set(data, string(k), "") + return e + case ErrNotList: + rs, e := t.val() + if e != nil && e != io.EOF { + return e + } + v, e := t.runesToVal(rs) + set(data, string(k), v) + return e + default: + return e + } + + case last == ',': + // No value given. Set the value to empty string. Return error. + set(data, string(k), "") + return fmt.Errorf("key %q has no value (cannot end with ,)", string(k)) + case last == '.': + // First, create or find the target map. + inner := map[string]interface{}{} + if _, ok := data[string(k)]; ok { + inner = data[string(k)].(map[string]interface{}) + } + + // Recurse + e := t.key(inner) + if len(inner) == 0 { + return fmt.Errorf("key map %q has no value", string(k)) + } + set(data, string(k), inner) + return e + } + } +} + +func set(data map[string]interface{}, key string, val interface{}) { + // If key is empty, don't set it. + if len(key) == 0 { + return + } + data[key] = val +} + +func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) { + // There are possible index values that are out of range on a target system + // causing a panic. This will catch the panic and return an error instead. + // The value of the index that causes a panic varies from system to system. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("error processing index %d: %s", index, r) + } + }() + + if index < 0 { + return list, fmt.Errorf("negative %d index not allowed", index) + } + if index > MaxIndex { + return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex) + } + if len(list) <= index { + newlist := make([]interface{}, index+1) + copy(newlist, list) + list = newlist + } + list[index] = val + return list, nil +} + +func (t *parser) keyIndex() (int, error) { + // First, get the key. + stop := runeSet([]rune{']'}) + v, _, err := runesUntil(t.sc, stop) + if err != nil { + return 0, err + } + // v should be the index + return strconv.Atoi(string(v)) + +} +func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) { + if i < 0 { + return list, fmt.Errorf("negative %d index not allowed", i) + } + stop := runeSet([]rune{'[', '.', '='}) + switch k, last, err := runesUntil(t.sc, stop); { + case len(k) > 0: + return list, fmt.Errorf("unexpected data at end of array index: %q", k) + case err != nil: + return list, err + case last == '=': + vl, e := t.valList() + switch e { + case nil: + return setIndex(list, i, vl) + case io.EOF: + return setIndex(list, i, "") + case ErrNotList: + rs, e := t.val() + if e != nil && e != io.EOF { + return list, e + } + v, e := t.runesToVal(rs) + if e != nil { + return nil, e + } + return setIndex(list, i, v) + default: + return list, e + } + case last == '[': + // now we have a nested list. Read the index and handle. + i, err := t.keyIndex() + if err != nil { + return list, fmt.Errorf("error parsing index: %s", err) + } + // Now we need to get the value after the ]. + list2, err := t.listItem(list, i) + if err != nil { + return nil, err + } + return setIndex(list, i, list2) + case last == '.': + // We have a nested object. Send to t.key + inner := map[string]interface{}{} + if len(list) > i { + var ok bool + inner, ok = list[i].(map[string]interface{}) + if !ok { + // We have indices out of order. Initialize empty value. + list[i] = map[string]interface{}{} + inner = list[i].(map[string]interface{}) + } + } + + // Recurse + e := t.key(inner) + if e != nil { + return list, e + } + return setIndex(list, i, inner) + default: + return nil, fmt.Errorf("parse error: unexpected token %v", last) + } +} + +func (t *parser) val() ([]rune, error) { + stop := runeSet([]rune{','}) + v, _, err := runesUntil(t.sc, stop) + return v, err +} + +func (t *parser) valList() ([]interface{}, error) { + r, _, e := t.sc.ReadRune() + if e != nil { + return []interface{}{}, e + } + + if r != '{' { + e = t.sc.UnreadRune() + if e != nil { + return []interface{}{}, e + } + return []interface{}{}, ErrNotList + } + + list := []interface{}{} + stop := runeSet([]rune{',', '}'}) + for { + switch rs, last, err := runesUntil(t.sc, stop); { + case err != nil: + if err == io.EOF { + err = errors.New("list must terminate with '}'") + } + return list, err + case last == '}': + // If this is followed by ',', consume it. + if r, _, e := t.sc.ReadRune(); e == nil && r != ',' { + e = t.sc.UnreadRune() + if e != nil { + return []interface{}{}, e + } + } + v, e := t.runesToVal(rs) + list = append(list, v) + return list, e + case last == ',': + v, e := t.runesToVal(rs) + if e != nil { + return list, e + } + list = append(list, v) + } + } +} + +func runesUntil(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) { + var v []rune + for { + switch r, _, e := in.ReadRune(); { + case e != nil: + return v, r, e + case inMap(r, stop): + return v, r, nil + case r == '\\': + next, _, e := in.ReadRune() + if e != nil { + return v, next, e + } + v = append(v, next) + default: + v = append(v, r) + } + } +} + +func inMap(k rune, m map[rune]bool) bool { + _, ok := m[k] + return ok +} + +func typedVal(v []rune, st bool) interface{} { + val := string(v) + + if st { + return val + } + + if strings.EqualFold(val, "true") { + return true + } + + if strings.EqualFold(val, "false") { + return false + } + + if strings.EqualFold(val, "null") { + return struct{}{} + } + + if strings.EqualFold(val, "0") { + return int64(0) + } + + // If this value does not start with zero, try parsing it to an int + if len(val) != 0 && val[0] != '0' { + if iv, err := strconv.ParseInt(val, 10, 64); err == nil { + return iv + } + } + + return val +} diff --git a/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go b/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go new file mode 100644 index 00000000000..905c4863428 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go @@ -0,0 +1,40 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package extension + +import ( + "sync" +) + +var pluginMtx sync.Mutex +var bundleExtensions map[string]Handler + +// Handler is used to unmarshal a byte slice of a registered extension +// EXPERIMENTAL: Please don't rely on this functionality, it may go +// away or change in the future. +type Handler func([]byte, any) error + +// RegisterExtension registers a Handler for a certain file extension, including +// the dot: ".json", not "json". +// EXPERIMENTAL: Please don't rely on this functionality, it may go +// away or change in the future. +func RegisterExtension(name string, handler Handler) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + + if bundleExtensions == nil { + bundleExtensions = map[string]Handler{} + } + bundleExtensions[name] = handler +} + +// FindExtension ios used to look up a registered extension Handler +// EXPERIMENTAL: Please don't rely on this functionality, it may go +// away or change in the future. +func FindExtension(ext string) Handler { + pluginMtx.Lock() + defer pluginMtx.Unlock() + return bundleExtensions[ext] +} diff --git a/vendor/github.com/open-policy-agent/opa/loader/loader.go b/vendor/github.com/open-policy-agent/opa/loader/loader.go index 1c2a04d06ba..8640d9c234f 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/loader.go +++ b/vendor/github.com/open-policy-agent/opa/loader/loader.go @@ -8,6 +8,7 @@ package loader import ( "bytes" "fmt" + "io" "io/fs" "os" "path/filepath" @@ -91,6 +92,7 @@ type FileLoader interface { All(paths []string) (*Result, error) Filtered(paths []string, filter Filter) (*Result, error) AsBundle(path string) (*bundle.Bundle, error) + WithReader(io.Reader) FileLoader WithFS(fs.FS) FileLoader WithMetrics(metrics.Metrics) FileLoader WithFilter(Filter) FileLoader @@ -98,6 +100,7 @@ type FileLoader interface { WithSkipBundleVerification(bool) FileLoader WithProcessAnnotation(bool) FileLoader WithCapabilities(*ast.Capabilities) FileLoader + WithJSONOptions(*ast.JSONOptions) FileLoader } // NewFileLoader returns a new FileLoader instance. @@ -116,6 +119,7 @@ type fileLoader struct { files map[string]bundle.FileInfo opts ast.ParserOptions fsys fs.FS + reader io.Reader } // WithFS provides an fs.FS to use for loading files. You can pass nil to @@ -126,6 +130,14 @@ func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { return fl } +// WithReader provides an io.Reader to use for loading the bundle tarball. +// An io.Reader passed via WithReader takes precedence over an fs.FS passed +// via WithFS. +func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { + fl.reader = rdr + return fl +} + // WithMetrics provides the metrics instance to use while loading func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { fl.metrics = m @@ -162,6 +174,12 @@ func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { return fl } +// WithJSONOptions sets the JSONOptions for use when parsing files +func (fl *fileLoader) WithJSONOptions(opts *ast.JSONOptions) FileLoader { + fl.opts.JSONOptions = opts + return fl +} + // All returns a Result object loaded (recursively) from the specified paths. func (fl fileLoader) All(paths []string) (*Result, error) { return fl.Filtered(paths, nil) @@ -212,7 +230,14 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { if err != nil { return nil, err } - bundleLoader, isDir, err := GetBundleDirectoryLoaderWithFilter(path, fl.filter) + + var bundleLoader bundle.DirectoryLoader + var isDir bool + if fl.reader != nil { + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) + } else { + bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) + } if err != nil { return nil, err } @@ -222,7 +247,8 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { WithBundleVerificationConfig(fl.bvc). WithSkipBundleVerification(fl.skipVerify). WithProcessAnnotations(fl.opts.ProcessAnnotation). - WithCapabilities(fl.opts.Capabilities) + WithCapabilities(fl.opts.Capabilities). + WithJSONOptions(fl.opts.JSONOptions) // For bundle directories add the full path in front of module file names // to simplify debugging. @@ -239,55 +265,57 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { } // GetBundleDirectoryLoader returns a bundle directory loader which can be used to load -// files in the directory. +// files in the directory func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, false, err - } - - fi, err := os.Stat(path) - if err != nil { - return nil, false, fmt.Errorf("error reading %q: %s", path, err) - } - - var bundleLoader bundle.DirectoryLoader - - if fi.IsDir() { - bundleLoader = bundle.NewDirectoryLoader(path) - } else { - fh, err := os.Open(path) - if err != nil { - return nil, false, err - } - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) - } - return bundleLoader, fi.IsDir(), nil + return GetBundleDirectoryLoaderFS(nil, path, nil) } // GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load // files in the directory after applying the given filter. func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, filter) +} + +// GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load +// files in the directory. +func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { path, err := fileurl.Clean(path) if err != nil { return nil, false, err } - fi, err := os.Stat(path) + var fi fs.FileInfo + if fsys != nil { + fi, err = fs.Stat(fsys, path) + } else { + fi, err = os.Stat(path) + } if err != nil { return nil, false, fmt.Errorf("error reading %q: %s", path, err) } var bundleLoader bundle.DirectoryLoader - if fi.IsDir() { - bundleLoader = bundle.NewDirectoryLoader(path).WithFilter(filter) + if fsys != nil { + bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) + } else { + bundleLoader = bundle.NewDirectoryLoader(path) + } } else { - fh, err := os.Open(path) + var fh fs.File + if fsys != nil { + fh, err = fsys.Open(path) + } else { + fh, err = os.Open(path) + } if err != nil { return nil, false, err } - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path).WithFilter(filter) + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) + } + + if filter != nil { + bundleLoader = bundleLoader.WithFilter(filter) } return bundleLoader, fi.IsDir(), nil } @@ -721,11 +749,10 @@ func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) { m.Timer(metrics.RegoDataParse).Start() - buf := bytes.NewBuffer(bs) - decoder := util.NewJSONDecoder(buf) var x interface{} - err := decoder.Decode(&x) + err := util.UnmarshalJSON(bs, &x) m.Timer(metrics.RegoDataParse).Stop() + if err != nil { return nil, fmt.Errorf("%s: %w", path, err) } diff --git a/vendor/github.com/open-policy-agent/opa/logging/logging.go b/vendor/github.com/open-policy-agent/opa/logging/logging.go new file mode 100644 index 00000000000..3ce76da468b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/logging/logging.go @@ -0,0 +1,233 @@ +package logging + +import ( + "context" + "io" + + "github.com/sirupsen/logrus" +) + +// Level log level for Logger +type Level uint8 + +const ( + // Error error log level + Error Level = iota + // Warn warn log level + Warn + // Info info log level + Info + // Debug debug log level + Debug +) + +// Logger provides interface for OPA logger implementations +type Logger interface { + Debug(fmt string, a ...interface{}) + Info(fmt string, a ...interface{}) + Error(fmt string, a ...interface{}) + Warn(fmt string, a ...interface{}) + + WithFields(map[string]interface{}) Logger + + GetLevel() Level + SetLevel(Level) +} + +// StandardLogger is the default OPA logger implementation. +type StandardLogger struct { + logger *logrus.Logger + fields map[string]interface{} +} + +// New returns a new standard logger. +func New() *StandardLogger { + return &StandardLogger{ + logger: logrus.New(), + } +} + +// Get returns the standard logger used throughout OPA. +// +// Deprecated. Do not rely on the global logger. +func Get() *StandardLogger { + return &StandardLogger{ + logger: logrus.StandardLogger(), + } +} + +// SetOutput sets the underlying logrus output. +func (l *StandardLogger) SetOutput(w io.Writer) { + l.logger.SetOutput(w) +} + +// SetFormatter sets the underlying logrus formatter. +func (l *StandardLogger) SetFormatter(formatter logrus.Formatter) { + l.logger.SetFormatter(formatter) +} + +// WithFields provides additional fields to include in log output +func (l *StandardLogger) WithFields(fields map[string]interface{}) Logger { + cp := *l + cp.fields = make(map[string]interface{}) + for k, v := range l.fields { + cp.fields[k] = v + } + for k, v := range fields { + cp.fields[k] = v + } + return &cp +} + +// getFields returns additional fields of this logger +func (l *StandardLogger) getFields() map[string]interface{} { + return l.fields +} + +// SetLevel sets the standard logger level. +func (l *StandardLogger) SetLevel(level Level) { + var logrusLevel logrus.Level + switch level { + case Error: // set logging level report Warn or higher (includes Error) + logrusLevel = logrus.WarnLevel + case Warn: + logrusLevel = logrus.WarnLevel + case Info: + logrusLevel = logrus.InfoLevel + case Debug: + logrusLevel = logrus.DebugLevel + default: + l.Warn("unknown log level %v", level) + logrusLevel = logrus.InfoLevel + } + + l.logger.SetLevel(logrusLevel) +} + +// GetLevel returns the standard logger level. +func (l *StandardLogger) GetLevel() Level { + logrusLevel := l.logger.GetLevel() + + var level Level + switch logrusLevel { + case logrus.WarnLevel: + level = Error + case logrus.InfoLevel: + level = Info + case logrus.DebugLevel: + level = Debug + default: + l.Warn("unknown log level %v", logrusLevel) + level = Info + } + + return level +} + +// Debug logs at debug level +func (l *StandardLogger) Debug(fmt string, a ...interface{}) { + l.logger.WithFields(l.getFields()).Debugf(fmt, a...) +} + +// Info logs at info level +func (l *StandardLogger) Info(fmt string, a ...interface{}) { + l.logger.WithFields(l.getFields()).Infof(fmt, a...) +} + +// Error logs at error level +func (l *StandardLogger) Error(fmt string, a ...interface{}) { + l.logger.WithFields(l.getFields()).Errorf(fmt, a...) +} + +// Warn logs at warn level +func (l *StandardLogger) Warn(fmt string, a ...interface{}) { + l.logger.WithFields(l.getFields()).Warnf(fmt, a...) +} + +// NoOpLogger logging implementation that does nothing +type NoOpLogger struct { + level Level + fields map[string]interface{} +} + +// NewNoOpLogger instantiates new NoOpLogger +func NewNoOpLogger() *NoOpLogger { + return &NoOpLogger{ + level: Info, + } +} + +// WithFields provides additional fields to include in log output. +// Implemented here primarily to be able to switch between implementations without loss of data. +func (l *NoOpLogger) WithFields(fields map[string]interface{}) Logger { + cp := *l + cp.fields = fields + return &cp +} + +// Debug noop +func (*NoOpLogger) Debug(string, ...interface{}) {} + +// Info noop +func (*NoOpLogger) Info(string, ...interface{}) {} + +// Error noop +func (*NoOpLogger) Error(string, ...interface{}) {} + +// Warn noop +func (*NoOpLogger) Warn(string, ...interface{}) {} + +// SetLevel set log level +func (l *NoOpLogger) SetLevel(level Level) { + l.level = level +} + +// GetLevel get log level +func (l *NoOpLogger) GetLevel() Level { + return l.level +} + +type requestContextKey string + +const reqCtxKey = requestContextKey("request-context-key") + +// RequestContext represents the request context used to store data +// related to the request that could be used on logs. +type RequestContext struct { + ClientAddr string + ReqID uint64 + ReqMethod string + ReqPath string +} + +// Fields adapts the RequestContext fields to logrus.Fields. +func (rctx RequestContext) Fields() logrus.Fields { + return logrus.Fields{ + "client_addr": rctx.ClientAddr, + "req_id": rctx.ReqID, + "req_method": rctx.ReqMethod, + "req_path": rctx.ReqPath, + } +} + +// NewContext returns a copy of parent with an associated RequestContext. +func NewContext(parent context.Context, val *RequestContext) context.Context { + return context.WithValue(parent, reqCtxKey, val) +} + +// FromContext returns the RequestContext associated with ctx, if any. +func FromContext(ctx context.Context) (*RequestContext, bool) { + requestContext, ok := ctx.Value(reqCtxKey).(*RequestContext) + return requestContext, ok +} + +const decisionCtxKey = requestContextKey("decision_id") + +func WithDecisionID(parent context.Context, id string) context.Context { + return context.WithValue(parent, decisionCtxKey, id) +} + +func DecisionIDFromContext(ctx context.Context) (string, bool) { + s, ok := ctx.Value(decisionCtxKey).(string) + return s, ok +} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go b/vendor/github.com/open-policy-agent/opa/plugins/plugins.go new file mode 100644 index 00000000000..ef4d31ceebc --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/plugins/plugins.go @@ -0,0 +1,983 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package plugins implements plugin management for the policy engine. +package plugins + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/sdk/trace" + + "github.com/gorilla/mux" + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/bundle" + "github.com/open-policy-agent/opa/config" + "github.com/open-policy-agent/opa/hooks" + bundleUtils "github.com/open-policy-agent/opa/internal/bundle" + cfg "github.com/open-policy-agent/opa/internal/config" + "github.com/open-policy-agent/opa/internal/errors" + initload "github.com/open-policy-agent/opa/internal/runtime/init" + "github.com/open-policy-agent/opa/keys" + "github.com/open-policy-agent/opa/loader" + "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/plugins/rest" + "github.com/open-policy-agent/opa/resolver/wasm" + "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/topdown/cache" + "github.com/open-policy-agent/opa/topdown/print" + "github.com/open-policy-agent/opa/tracing" +) + +// Factory defines the interface OPA uses to instantiate your plugin. +// +// When OPA processes it's configuration it looks for factories that +// have been registered by calling runtime.RegisterPlugin. Factories +// are registered to a name which is used to key into the +// configuration blob. If your plugin has not been configured, your +// factory will not be invoked. +// +// plugins: +// my_plugin1: +// some_key: foo +// # my_plugin2: +// # some_key2: bar +// +// If OPA was started with the configuration above and received two +// calls to runtime.RegisterPlugins (one with NAME "my_plugin1" and +// one with NAME "my_plugin2"), it would only invoke the factory for +// for my_plugin1. +// +// OPA instantiates and reconfigures plugins in two steps. First, OPA +// will call Validate to check the configuration. Assuming the +// configuration is valid, your factory should return a configuration +// value that can be used to construct your plugin. Second, OPA will +// call New to instantiate your plugin providing the configuration +// value returned from the Validate call. +// +// Validate receives a slice of bytes representing plugin +// configuration and returns a configuration value that can be used to +// instantiate your plugin. The manager is provided to give access to +// the OPA's compiler, storage layer, and global configuration. Your +// Validate function will typically: +// +// 1. Deserialize the raw config bytes +// 2. Validate the deserialized config for semantic errors +// 3. Inject default values +// 4. Return a deserialized/parsed config +// +// New receives a valid configuration for your plugin and returns a +// plugin object. Your New function will typically: +// +// 1. Cast the config value to it's own type +// 2. Instantiate a plugin object +// 3. Return the plugin object +// 4. Update status via `plugins.Manager#UpdatePluginStatus` +// +// After a plugin has been created subsequent status updates can be +// send anytime the plugin enters a ready or error state. +type Factory interface { + Validate(manager *Manager, config []byte) (interface{}, error) + New(manager *Manager, config interface{}) Plugin +} + +// Plugin defines the interface OPA uses to manage your plugin. +// +// When OPA starts it will start all of the plugins it was configured +// to instantiate. Each time a new plugin is configured (via +// discovery), OPA will start it. You can use the Start call to spawn +// additional goroutines or perform initialization tasks. +// +// Currently OPA will not call Stop on plugins. +// +// When OPA receives new configuration for your plugin via discovery +// it will first Validate the configuration using your factory and +// then call Reconfigure. +type Plugin interface { + Start(ctx context.Context) error + Stop(ctx context.Context) + Reconfigure(ctx context.Context, config interface{}) +} + +// Triggerable defines the interface plugins use for manual plugin triggers. +type Triggerable interface { + Trigger(context.Context) error +} + +// State defines the state that a Plugin instance is currently +// in with pre-defined states. +type State string + +const ( + // StateNotReady indicates that the Plugin is not in an error state, but isn't + // ready for normal operation yet. This should only happen at + // initialization time. + StateNotReady State = "NOT_READY" + + // StateOK signifies that the Plugin is operating normally. + StateOK State = "OK" + + // StateErr indicates that the Plugin is in an error state and should not + // be considered as functional. + StateErr State = "ERROR" + + // StateWarn indicates the Plugin is operating, but in a potentially dangerous or + // degraded state. It may be used to indicate manual remediation is needed, or to + // alert admins of some other noteworthy state. + StateWarn State = "WARN" +) + +// TriggerMode defines the trigger mode utilized by a Plugin for bundle download, +// log upload etc. +type TriggerMode string + +const ( + // TriggerPeriodic represents periodic polling mechanism + TriggerPeriodic TriggerMode = "periodic" + + // TriggerManual represents manual triggering mechanism + TriggerManual TriggerMode = "manual" + + // DefaultTriggerMode represents default trigger mechanism + DefaultTriggerMode TriggerMode = "periodic" +) + +// Status has a Plugin's current status plus an optional Message. +type Status struct { + State State `json:"state"` + Message string `json:"message,omitempty"` +} + +func (s *Status) String() string { + return fmt.Sprintf("{%v %q}", s.State, s.Message) +} + +// StatusListener defines a handler to register for status updates. +type StatusListener func(status map[string]*Status) + +// Manager implements lifecycle management of plugins and gives plugins access +// to engine-wide components like storage. +type Manager struct { + Store storage.Store + Config *config.Config + Info *ast.Term + ID string + + compiler *ast.Compiler + compilerMux sync.RWMutex + wasmResolvers []*wasm.Resolver + wasmResolversMtx sync.RWMutex + services map[string]rest.Client + keys map[string]*keys.Config + plugins []namedplugin + registeredTriggers []func(storage.Transaction) + mtx sync.Mutex + pluginStatus map[string]*Status + pluginStatusListeners map[string]StatusListener + initBundles map[string]*bundle.Bundle + initFiles loader.Result + maxErrors int + initialized bool + interQueryBuiltinCacheConfig *cache.Config + gracefulShutdownPeriod int + registeredCacheTriggers []func(*cache.Config) + logger logging.Logger + consoleLogger logging.Logger + serverInitialized chan struct{} + serverInitializedOnce sync.Once + printHook print.Hook + enablePrintStatements bool + router *mux.Router + prometheusRegister prometheus.Registerer + tracerProvider *trace.TracerProvider + distributedTacingOpts tracing.Options + registeredNDCacheTriggers []func(bool) + bootstrapConfigLabels map[string]string + hooks hooks.Hooks +} + +type managerContextKey string +type managerWasmResolverKey string + +const managerCompilerContextKey = managerContextKey("compiler") +const managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers") + +// SetCompilerOnContext puts the compiler into the storage context. Calling this +// function before committing updated policies to storage allows the manager to +// skip parsing and compiling of modules. Instead, the manager will use the +// compiler that was stored on the context. +func SetCompilerOnContext(context *storage.Context, compiler *ast.Compiler) { + context.Put(managerCompilerContextKey, compiler) +} + +// GetCompilerOnContext gets the compiler cached on the storage context. +func GetCompilerOnContext(context *storage.Context) *ast.Compiler { + compiler, ok := context.Get(managerCompilerContextKey).(*ast.Compiler) + if !ok { + return nil + } + return compiler +} + +// SetWasmResolversOnContext puts a set of Wasm Resolvers into the storage +// context. Calling this function before committing updated wasm modules to +// storage allows the manager to skip initializing modules before using them. +// Instead, the manager will use the compiler that was stored on the context. +func SetWasmResolversOnContext(context *storage.Context, rs []*wasm.Resolver) { + context.Put(managerWasmResolverContextKey, rs) +} + +// getWasmResolversOnContext gets the resolvers cached on the storage context. +func getWasmResolversOnContext(context *storage.Context) []*wasm.Resolver { + resolvers, ok := context.Get(managerWasmResolverContextKey).([]*wasm.Resolver) + if !ok { + return nil + } + return resolvers +} + +func validateTriggerMode(mode TriggerMode) error { + switch mode { + case TriggerPeriodic, TriggerManual: + return nil + default: + return fmt.Errorf("invalid trigger mode %q (want %q or %q)", mode, TriggerPeriodic, TriggerManual) + } +} + +// ValidateAndInjectDefaultsForTriggerMode validates the trigger mode and injects default values +func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, error) { + + if a == nil && b != nil { + err := validateTriggerMode(*b) + if err != nil { + return nil, err + } + return b, nil + } else if a != nil && b == nil { + err := validateTriggerMode(*a) + if err != nil { + return nil, err + } + return a, nil + } else if a != nil && b != nil { + if *a != *b { + return nil, fmt.Errorf("trigger mode mismatch: %s and %s (hint: check discovery configuration)", *a, *b) + } + err := validateTriggerMode(*a) + if err != nil { + return nil, err + } + return a, nil + + } else { + t := DefaultTriggerMode + return &t, nil + } +} + +type namedplugin struct { + name string + plugin Plugin +} + +// Info sets the runtime information on the manager. The runtime information is +// propagated to opa.runtime() built-in function calls. +func Info(term *ast.Term) func(*Manager) { + return func(m *Manager) { + m.Info = term + } +} + +// InitBundles provides the initial set of bundles to load. +func InitBundles(b map[string]*bundle.Bundle) func(*Manager) { + return func(m *Manager) { + m.initBundles = b + } +} + +// InitFiles provides the initial set of other data/policy files to load. +func InitFiles(f loader.Result) func(*Manager) { + return func(m *Manager) { + m.initFiles = f + } +} + +// MaxErrors sets the error limit for the manager's shared compiler. +func MaxErrors(n int) func(*Manager) { + return func(m *Manager) { + m.maxErrors = n + } +} + +// GracefulShutdownPeriod passes the configured graceful shutdown period to plugins +func GracefulShutdownPeriod(gracefulShutdownPeriod int) func(*Manager) { + return func(m *Manager) { + m.gracefulShutdownPeriod = gracefulShutdownPeriod + } +} + +// Logger configures the passed logger on the plugin manager (useful to +// configure default fields) +func Logger(logger logging.Logger) func(*Manager) { + return func(m *Manager) { + m.logger = logger + } +} + +// ConsoleLogger sets the passed logger to be used by plugins that are +// configured with console logging enabled. +func ConsoleLogger(logger logging.Logger) func(*Manager) { + return func(m *Manager) { + m.consoleLogger = logger + } +} + +func EnablePrintStatements(yes bool) func(*Manager) { + return func(m *Manager) { + m.enablePrintStatements = yes + } +} + +func PrintHook(h print.Hook) func(*Manager) { + return func(m *Manager) { + m.printHook = h + } +} + +func WithRouter(r *mux.Router) func(*Manager) { + return func(m *Manager) { + m.router = r + } +} + +// WithPrometheusRegister sets the passed prometheus.Registerer to be used by plugins +func WithPrometheusRegister(prometheusRegister prometheus.Registerer) func(*Manager) { + return func(m *Manager) { + m.prometheusRegister = prometheusRegister + } +} + +// WithTracerProvider sets the passed *trace.TracerProvider to be used by plugins +func WithTracerProvider(tracerProvider *trace.TracerProvider) func(*Manager) { + return func(m *Manager) { + m.tracerProvider = tracerProvider + } +} + +// WithDistributedTracingOpts sets the options to be used by distributed tracing. +func WithDistributedTracingOpts(tr tracing.Options) func(*Manager) { + return func(m *Manager) { + m.distributedTacingOpts = tr + } +} + +// WithHooks allows passing hooks to the plugin manager. +func WithHooks(hs hooks.Hooks) func(*Manager) { + return func(m *Manager) { + m.hooks = hs + } +} + +// New creates a new Manager using config. +func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*Manager, error) { + + parsedConfig, err := config.ParseConfig(raw, id) + if err != nil { + return nil, err + } + + m := &Manager{ + Store: store, + Config: parsedConfig, + ID: id, + pluginStatus: map[string]*Status{}, + pluginStatusListeners: map[string]StatusListener{}, + maxErrors: -1, + serverInitialized: make(chan struct{}), + bootstrapConfigLabels: parsedConfig.Labels, + } + + for _, f := range opts { + f(m) + } + + if m.logger == nil { + m.logger = logging.Get() + } + + if m.consoleLogger == nil { + m.consoleLogger = logging.New() + } + + m.hooks.Each(func(h hooks.Hook) { + if f, ok := h.(hooks.ConfigHook); ok { + if c, e := f.OnConfig(context.Background(), parsedConfig); e != nil { + err = errors.Join(err, e) + } else { + parsedConfig = c + } + } + }) + if err != nil { + return nil, err + } + + // do after options and overrides + m.keys, err = keys.ParseKeysConfig(parsedConfig.Keys) + if err != nil { + return nil, err + } + + m.interQueryBuiltinCacheConfig, err = cache.ParseCachingConfig(parsedConfig.Caching) + if err != nil { + return nil, err + } + + serviceOpts := cfg.ServiceOptions{ + Raw: parsedConfig.Services, + AuthPlugin: m.AuthPlugin, + Keys: m.keys, + Logger: m.logger, + DistributedTacingOpts: m.distributedTacingOpts, + } + + m.services, err = cfg.ParseServicesConfig(serviceOpts) + if err != nil { + return nil, err + } + + return m, nil +} + +// Init returns an error if the manager could not initialize itself. Init() should +// be called before Start(). Init() is idempotent. +func (m *Manager) Init(ctx context.Context) error { + + if m.initialized { + return nil + } + + params := storage.TransactionParams{ + Write: true, + Context: storage.NewContext(), + } + + err := storage.Txn(ctx, m.Store, params, func(txn storage.Transaction) error { + + result, err := initload.InsertAndCompile(ctx, initload.InsertAndCompileOptions{ + Store: m.Store, + Txn: txn, + Files: m.initFiles, + Bundles: m.initBundles, + MaxErrors: m.maxErrors, + EnablePrintStatements: m.enablePrintStatements, + }) + + if err != nil { + return err + } + + SetCompilerOnContext(params.Context, result.Compiler) + + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) + if err != nil { + return err + } + SetWasmResolversOnContext(params.Context, resolvers) + + _, err = m.Store.Register(ctx, txn, storage.TriggerConfig{OnCommit: m.onCommit}) + return err + }) + + if err != nil { + return err + } + + m.initialized = true + return nil +} + +// Labels returns the set of labels from the configuration. +func (m *Manager) Labels() map[string]string { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.Config.Labels +} + +// InterQueryBuiltinCacheConfig returns the configuration for the inter-query cache. +func (m *Manager) InterQueryBuiltinCacheConfig() *cache.Config { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.interQueryBuiltinCacheConfig +} + +// Register adds a plugin to the manager. When the manager is started, all of +// the plugins will be started. +func (m *Manager) Register(name string, plugin Plugin) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.plugins = append(m.plugins, namedplugin{ + name: name, + plugin: plugin, + }) + if _, ok := m.pluginStatus[name]; !ok { + m.pluginStatus[name] = &Status{State: StateNotReady} + } +} + +// Plugins returns the list of plugins registered with the manager. +func (m *Manager) Plugins() []string { + m.mtx.Lock() + defer m.mtx.Unlock() + result := make([]string, len(m.plugins)) + for i := range m.plugins { + result[i] = m.plugins[i].name + } + return result +} + +// Plugin returns the plugin registered with name or nil if name is not found. +func (m *Manager) Plugin(name string) Plugin { + m.mtx.Lock() + defer m.mtx.Unlock() + for i := range m.plugins { + if m.plugins[i].name == name { + return m.plugins[i].plugin + } + } + return nil +} + +// AuthPlugin returns the HTTPAuthPlugin registered with name or nil if name is not found. +func (m *Manager) AuthPlugin(name string) rest.HTTPAuthPlugin { + m.mtx.Lock() + defer m.mtx.Unlock() + for i := range m.plugins { + if m.plugins[i].name == name { + return m.plugins[i].plugin.(rest.HTTPAuthPlugin) + } + } + return nil +} + +// GetCompiler returns the manager's compiler. +func (m *Manager) GetCompiler() *ast.Compiler { + m.compilerMux.RLock() + defer m.compilerMux.RUnlock() + return m.compiler +} + +func (m *Manager) setCompiler(compiler *ast.Compiler) { + m.compilerMux.Lock() + defer m.compilerMux.Unlock() + m.compiler = compiler +} + +// GetRouter returns the managers router if set +func (m *Manager) GetRouter() *mux.Router { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.router +} + +// RegisterCompilerTrigger registers for change notifications when the compiler +// is changed. +func (m *Manager) RegisterCompilerTrigger(f func(storage.Transaction)) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.registeredTriggers = append(m.registeredTriggers, f) +} + +// GetWasmResolvers returns the manager's set of Wasm Resolvers. +func (m *Manager) GetWasmResolvers() []*wasm.Resolver { + m.wasmResolversMtx.RLock() + defer m.wasmResolversMtx.RUnlock() + return m.wasmResolvers +} + +func (m *Manager) setWasmResolvers(rs []*wasm.Resolver) { + m.wasmResolversMtx.Lock() + defer m.wasmResolversMtx.Unlock() + m.wasmResolvers = rs +} + +// Start starts the manager. Init() should be called once before Start(). +func (m *Manager) Start(ctx context.Context) error { + + if m == nil { + return nil + } + + if !m.initialized { + if err := m.Init(ctx); err != nil { + return err + } + } + + var toStart []Plugin + + func() { + m.mtx.Lock() + defer m.mtx.Unlock() + toStart = make([]Plugin, len(m.plugins)) + for i := range m.plugins { + toStart[i] = m.plugins[i].plugin + } + }() + + for i := range toStart { + if err := toStart[i].Start(ctx); err != nil { + return err + } + } + + return nil +} + +// Stop stops the manager, stopping all the plugins registered with it. +// Any plugin that needs to perform cleanup should do so within the duration +// of the graceful shutdown period passed with the context as a timeout. +// Note that a graceful shutdown period configured with the Manager instance +// will override the timeout of the passed in context (if applicable). +func (m *Manager) Stop(ctx context.Context) { + var toStop []Plugin + + func() { + m.mtx.Lock() + defer m.mtx.Unlock() + toStop = make([]Plugin, len(m.plugins)) + for i := range m.plugins { + toStop[i] = m.plugins[i].plugin + } + }() + + var cancel context.CancelFunc + if m.gracefulShutdownPeriod > 0 { + ctx, cancel = context.WithTimeout(ctx, time.Duration(m.gracefulShutdownPeriod)*time.Second) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + for i := range toStop { + toStop[i].Stop(ctx) + } + if c, ok := m.Store.(interface{ Close(context.Context) error }); ok { + if err := c.Close(ctx); err != nil { + m.logger.Error("Error closing store: %v", err) + } + } +} + +// Reconfigure updates the configuration on the manager. +func (m *Manager) Reconfigure(config *config.Config) error { + opts := cfg.ServiceOptions{ + Raw: config.Services, + AuthPlugin: m.AuthPlugin, + Logger: m.logger, + DistributedTacingOpts: m.distributedTacingOpts, + } + + keys, err := keys.ParseKeysConfig(config.Keys) + if err != nil { + return err + } + opts.Keys = keys + + services, err := cfg.ParseServicesConfig(opts) + if err != nil { + return err + } + + interQueryBuiltinCacheConfig, err := cache.ParseCachingConfig(config.Caching) + if err != nil { + return err + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + // don't overwrite existing labels, only allow additions - always based on the boostrap config + if config.Labels == nil { + config.Labels = m.bootstrapConfigLabels + } else { + for label, value := range m.bootstrapConfigLabels { + config.Labels[label] = value + } + } + + // don't erase persistence directory + if config.PersistenceDirectory == nil { + config.PersistenceDirectory = m.Config.PersistenceDirectory + } + + m.Config = config + m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig + for name, client := range services { + m.services[name] = client + } + + for name, key := range keys { + m.keys[name] = key + } + + for _, trigger := range m.registeredCacheTriggers { + trigger(interQueryBuiltinCacheConfig) + } + + for _, trigger := range m.registeredNDCacheTriggers { + trigger(config.NDBuiltinCache) + } + + return nil +} + +// PluginStatus returns the current statuses of any plugins registered. +func (m *Manager) PluginStatus() map[string]*Status { + m.mtx.Lock() + defer m.mtx.Unlock() + + return m.copyPluginStatus() +} + +// RegisterPluginStatusListener registers a StatusListener to be +// called when plugin status updates occur. +func (m *Manager) RegisterPluginStatusListener(name string, listener StatusListener) { + m.mtx.Lock() + defer m.mtx.Unlock() + + m.pluginStatusListeners[name] = listener +} + +// UnregisterPluginStatusListener removes a StatusListener registered with the +// same name. +func (m *Manager) UnregisterPluginStatusListener(name string) { + m.mtx.Lock() + defer m.mtx.Unlock() + + delete(m.pluginStatusListeners, name) +} + +// UpdatePluginStatus updates a named plugins status. Any registered +// listeners will be called with a copy of the new state of all +// plugins. +func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) { + + var toNotify map[string]StatusListener + var statuses map[string]*Status + + func() { + m.mtx.Lock() + defer m.mtx.Unlock() + m.pluginStatus[pluginName] = status + toNotify = make(map[string]StatusListener, len(m.pluginStatusListeners)) + for k, v := range m.pluginStatusListeners { + toNotify[k] = v + } + statuses = m.copyPluginStatus() + }() + + for _, l := range toNotify { + l(statuses) + } +} + +func (m *Manager) copyPluginStatus() map[string]*Status { + statusCpy := map[string]*Status{} + for k, v := range m.pluginStatus { + var cpy *Status + if v != nil { + cpy = &Status{ + State: v.State, + Message: v.Message, + } + } + statusCpy[k] = cpy + } + return statusCpy +} + +func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { + + compiler := GetCompilerOnContext(event.Context) + + // If the context does not contain the compiler fallback to loading the + // compiler from the store. Currently the bundle plugin sets the + // compiler on the context but the server does not (nor would users + // implementing their own policy loading.) + if compiler == nil && event.PolicyChanged() { + compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements) + } + + if compiler != nil { + m.setCompiler(compiler) + for _, f := range m.registeredTriggers { + f(txn) + } + } + + // Similar to the compiler, look for a set of resolvers on the transaction + // context. If they are not set we may need to reload from the store. + resolvers := getWasmResolversOnContext(event.Context) + if resolvers != nil { + m.setWasmResolvers(resolvers) + + } else if event.DataChanged() { + if requiresWasmResolverReload(event) { + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) + if err != nil { + panic(err) + } + m.setWasmResolvers(resolvers) + } else { + err := m.updateWasmResolversData(ctx, event) + if err != nil { + panic(err) + } + } + } +} + +func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool) (*ast.Compiler, error) { + policies, err := store.ListPolicies(ctx, txn) + if err != nil { + return nil, err + } + modules := map[string]*ast.Module{} + + for _, policy := range policies { + bs, err := store.GetPolicy(ctx, txn, policy) + if err != nil { + return nil, err + } + module, err := ast.ParseModule(policy, string(bs)) + if err != nil { + return nil, err + } + modules[policy] = module + } + + compiler := ast.NewCompiler().WithEnablePrintStatements(enablePrintStatements) + compiler.Compile(modules) + return compiler, nil +} + +func requiresWasmResolverReload(event storage.TriggerEvent) bool { + // If the data changes touched the bundle path (which includes + // the wasm modules) we will reload them. Otherwise update + // data for each module already on the manager. + for _, dataEvent := range event.Data { + if dataEvent.Path.HasPrefix(bundle.BundlesBasePath) { + return true + } + } + return false +} + +func (m *Manager) updateWasmResolversData(ctx context.Context, event storage.TriggerEvent) error { + m.wasmResolversMtx.Lock() + defer m.wasmResolversMtx.Unlock() + + for _, resolver := range m.wasmResolvers { + for _, dataEvent := range event.Data { + var err error + if dataEvent.Removed { + err = resolver.RemoveDataPath(ctx, dataEvent.Path) + } else { + err = resolver.SetDataPath(ctx, dataEvent.Path, dataEvent.Data) + } + if err != nil { + return fmt.Errorf("failed to update wasm runtime data: %s", err) + } + } + } + return nil +} + +// PublicKeys returns a public keys that can be used for verifying signed bundles. +func (m *Manager) PublicKeys() map[string]*keys.Config { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.keys +} + +// Client returns a client for communicating with a remote service. +func (m *Manager) Client(name string) rest.Client { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.services[name] +} + +// Services returns a list of services that m can provide clients for. +func (m *Manager) Services() []string { + m.mtx.Lock() + defer m.mtx.Unlock() + s := make([]string, 0, len(m.services)) + for name := range m.services { + s = append(s, name) + } + return s +} + +// Logger gets the standard logger for this plugin manager. +func (m *Manager) Logger() logging.Logger { + return m.logger +} + +// ConsoleLogger gets the console logger for this plugin manager. +func (m *Manager) ConsoleLogger() logging.Logger { + return m.consoleLogger +} + +func (m *Manager) PrintHook() print.Hook { + return m.printHook +} + +func (m *Manager) EnablePrintStatements() bool { + return m.enablePrintStatements +} + +// ServerInitialized signals a channel indicating that the OPA +// server has finished initialization. +func (m *Manager) ServerInitialized() { + m.serverInitializedOnce.Do(func() { close(m.serverInitialized) }) +} + +// ServerInitializedChannel returns a receive-only channel that +// is closed when the OPA server has finished initialization. +// Be aware that the socket of the server listener may not be +// open by the time this channel is closed. There is a very +// small window where the socket may still be closed, due to +// a race condition. +func (m *Manager) ServerInitializedChannel() <-chan struct{} { + return m.serverInitialized +} + +// RegisterCacheTrigger accepts a func that receives new inter-query cache config generated by +// a reconfigure of the plugin manager, so that it can be propagated to existing inter-query caches. +func (m *Manager) RegisterCacheTrigger(trigger func(*cache.Config)) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.registeredCacheTriggers = append(m.registeredCacheTriggers, trigger) +} + +// PrometheusRegister gets the prometheus.Registerer for this plugin manager. +func (m *Manager) PrometheusRegister() prometheus.Registerer { + return m.prometheusRegister +} + +// TracerProvider gets the *trace.TracerProvider for this plugin manager. +func (m *Manager) TracerProvider() *trace.TracerProvider { + return m.tracerProvider +} + +func (m *Manager) RegisterNDCacheTrigger(trigger func(bool)) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.registeredNDCacheTriggers = append(m.registeredNDCacheTriggers, trigger) +} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go new file mode 100644 index 00000000000..dedbcb8d1b4 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go @@ -0,0 +1,693 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rest + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws" + "github.com/open-policy-agent/opa/internal/jwx/jws/sign" + "github.com/open-policy-agent/opa/internal/providers/aws" + "github.com/open-policy-agent/opa/internal/uuid" + "github.com/open-policy-agent/opa/keys" + "github.com/open-policy-agent/opa/logging" +) + +const ( + // Default to s3 when the service for sigv4 signing is not specified for backwards compatibility + awsSigv4SigningDefaultService = "s3" +) + +// DefaultTLSConfig defines standard TLS configurations based on the Config +func DefaultTLSConfig(c Config) (*tls.Config, error) { + t := &tls.Config{} + url, err := url.Parse(c.URL) + if err != nil { + return nil, err + } + if url.Scheme == "https" { + t.InsecureSkipVerify = c.AllowInsecureTLS + } + + if c.TLS != nil && c.TLS.CACert != "" { + caCert, err := os.ReadFile(c.TLS.CACert) + if err != nil { + return nil, err + } + + var rootCAs *x509.CertPool + if c.TLS.SystemCARequired { + rootCAs, err = x509.SystemCertPool() + if err != nil { + return nil, err + } + } else { + rootCAs = x509.NewCertPool() + } + + ok := rootCAs.AppendCertsFromPEM(caCert) + if !ok { + return nil, errors.New("unable to parse and append CA certificate to certificate pool") + } + t.RootCAs = rootCAs + } + + return t, nil +} + +// DefaultRoundTripperClient is a reasonable set of defaults for HTTP auth plugins +func DefaultRoundTripperClient(t *tls.Config, timeout int64) *http.Client { + // Ensure we use a http.Transport with proper settings: the zero values are not + // a good choice, as they cause leaking connections: + // https://github.com/golang/go/issues/19620 + + // copy, we don't want to alter the default client's Transport + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.ResponseHeaderTimeout = time.Duration(timeout) * time.Second + tr.TLSClientConfig = t + + c := *http.DefaultClient + c.Transport = tr + return &c +} + +// defaultAuthPlugin represents baseline 'no auth' behavior if no alternative plugin is specified for a service +type defaultAuthPlugin struct{} + +func (*defaultAuthPlugin) NewClient(c Config) (*http.Client, error) { + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +func (*defaultAuthPlugin) Prepare(*http.Request) error { + return nil +} + +type serverTLSConfig struct { + CACert string `json:"ca_cert,omitempty"` + SystemCARequired bool `json:"system_ca_required,omitempty"` +} + +// bearerAuthPlugin represents authentication via a bearer token in the HTTP Authorization header +type bearerAuthPlugin struct { + Token string `json:"token"` + TokenPath string `json:"token_path"` + Scheme string `json:"scheme,omitempty"` + + // encode is set to true for the OCIDownloader because + // it expects tokens in plain text but needs them in base64. + encode bool +} + +func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) { + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + if ap.Token != "" && ap.TokenPath != "" { + return nil, errors.New("invalid config: specify a value for either the \"token\" or \"token_path\" field") + } + + if ap.Scheme == "" { + ap.Scheme = "Bearer" + } + + if c.Type == "oci" { + // Standard rest clients use the bearer token as it is defined in the Config + // but the OCIDownloader needs it encoded to base64 before using to sign a request. + ap.encode = true + } + + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +func (ap *bearerAuthPlugin) Prepare(req *http.Request) error { + token := ap.Token + + if ap.TokenPath != "" { + bytes, err := os.ReadFile(ap.TokenPath) + if err != nil { + return err + } + token = strings.TrimSpace(string(bytes)) + } + + if ap.encode { + token = base64.StdEncoding.EncodeToString([]byte(token)) + } + + req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token)) + return nil +} + +type tokenEndpointResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` +} + +// oauth2ClientCredentialsAuthPlugin represents authentication via a bearer token in the HTTP Authorization header +// obtained through the OAuth2 client credentials flow +type oauth2ClientCredentialsAuthPlugin struct { + GrantType string `json:"grant_type"` + TokenURL string `json:"token_url"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + SigningKeyID string `json:"signing_key"` + Thumbprint string `json:"thumbprint"` + Claims map[string]interface{} `json:"additional_claims"` + IncludeJti bool `json:"include_jti_claim"` + Scopes []string `json:"scopes,omitempty"` + AdditionalHeaders map[string]string `json:"additional_headers,omitempty"` + AdditionalParameters map[string]string `json:"additional_parameters,omitempty"` + + signingKey *keys.Config + signingKeyParsed interface{} + tokenCache *oauth2Token + tlsSkipVerify bool + logger logging.Logger +} + +type oauth2Token struct { + Token string + ExpiresAt time.Time +} + +func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(claims map[string]interface{}, signingKey interface{}) (*string, error) { + now := time.Now() + baseClaims := map[string]interface{}{ + "iat": now.Unix(), + "exp": now.Add(10 * time.Minute).Unix(), + } + if claims == nil { + claims = make(map[string]interface{}) + } + for k, v := range baseClaims { + claims[k] = v + } + + if len(ap.Scopes) > 0 { + claims["scope"] = strings.Join(ap.Scopes, " ") + } + + if ap.IncludeJti { + jti, err := uuid.New(rand.Reader) + if err != nil { + return nil, err + } + claims["jti"] = jti + } + + payload, err := json.Marshal(claims) + if err != nil { + return nil, err + } + + var jwsHeaders []byte + if ap.Thumbprint != "" { + bytes, err := hex.DecodeString(ap.Thumbprint) + if err != nil { + return nil, err + } + x5t := base64.URLEncoding.EncodeToString(bytes) + jwsHeaders = []byte(fmt.Sprintf(`{"typ":"JWT","alg":"%s","x5t":"%s"}`, ap.signingKey.Algorithm, x5t)) + } else { + jwsHeaders = []byte(fmt.Sprintf(`{"typ":"JWT","alg":"%s"}`, ap.signingKey.Algorithm)) + } + + jwsCompact, err := jws.SignLiteral(payload, + jwa.SignatureAlgorithm(ap.signingKey.Algorithm), + signingKey, + jwsHeaders, + rand.Reader) + if err != nil { + return nil, err + } + jwt := string(jwsCompact) + + return &jwt, nil +} + +func (ap *oauth2ClientCredentialsAuthPlugin) parseSigningKey(c Config) (err error) { + if ap.SigningKeyID == "" { + return errors.New("signing_key required for jwt_bearer grant type") + } + + if val, ok := c.keys[ap.SigningKeyID]; ok { + if val.PrivateKey == "" { + return errors.New("referenced signing_key does not include a private key") + } + ap.signingKey = val + } else { + return errors.New("signing_key refers to non-existent key") + } + + alg := jwa.SignatureAlgorithm(ap.signingKey.Algorithm) + ap.signingKeyParsed, err = sign.GetSigningKey(ap.signingKey.PrivateKey, alg) + if err != nil { + return err + } + + return nil +} + +func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client, error) { + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + if ap.GrantType == "" { + // Use client_credentials as default to not break existing config + ap.GrantType = grantTypeClientCredentials + } else if ap.GrantType != grantTypeClientCredentials && ap.GrantType != grantTypeJwtBearer { + return nil, errors.New("grant_type must be either client_credentials or jwt_bearer") + } + + if ap.GrantType == grantTypeJwtBearer || (ap.GrantType == grantTypeClientCredentials && ap.SigningKeyID != "") { + if err = ap.parseSigningKey(c); err != nil { + return nil, err + } + } + + // Inherit skip verify from the "parent" settings. Should this be configurable on the credentials too? + ap.tlsSkipVerify = c.AllowInsecureTLS + + ap.logger = c.logger + + if !strings.HasPrefix(ap.TokenURL, "https://") { + return nil, errors.New("token_url required to use https scheme") + } + if ap.GrantType == grantTypeClientCredentials { + if ap.ClientSecret != "" && ap.SigningKeyID != "" { + return nil, errors.New("can only use one of client_secret and signing_key for client_credentials") + } + if ap.SigningKeyID == "" && (ap.ClientID == "" || ap.ClientSecret == "") { + return nil, errors.New("client_id and client_secret required") + } + } + + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +// requestToken tries to obtain an access token using either the client credentials flow +// https://tools.ietf.org/html/rfc6749#section-4.4 +// or the JWT authorization grant +// https://tools.ietf.org/html/rfc7523 +func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (*oauth2Token, error) { + body := url.Values{} + if ap.GrantType == grantTypeJwtBearer { + authJwt, err := ap.createAuthJWT(ap.Claims, ap.signingKeyParsed) + if err != nil { + return nil, err + } + body.Add("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer") + body.Add("assertion", *authJwt) + } else { + body.Add("grant_type", grantTypeClientCredentials) + + if ap.SigningKeyID != "" { + authJwt, err := ap.createAuthJWT(ap.Claims, ap.signingKeyParsed) + if err != nil { + return nil, err + } + body.Add("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + body.Add("client_assertion", *authJwt) + + if ap.ClientID != "" { + body.Add("client_id", ap.ClientID) + } + } + } + + if len(ap.Scopes) > 0 { + body.Add("scope", strings.Join(ap.Scopes, " ")) + } + + for k, v := range ap.AdditionalParameters { + body.Set(k, v) + } + + r, err := http.NewRequestWithContext(ctx, "POST", ap.TokenURL, strings.NewReader(body.Encode())) + if err != nil { + return nil, err + } + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + if ap.GrantType == grantTypeClientCredentials && ap.ClientSecret != "" { + r.SetBasicAuth(ap.ClientID, ap.ClientSecret) + } + + for k, v := range ap.AdditionalHeaders { + r.Header.Add(k, v) + } + + client := DefaultRoundTripperClient(&tls.Config{InsecureSkipVerify: ap.tlsSkipVerify}, 10) + response, err := client.Do(r) + if err != nil { + return nil, err + } + + bodyRaw, err := io.ReadAll(response.Body) + if err != nil { + return nil, err + } + + if response.StatusCode != 200 { + return nil, fmt.Errorf("error in response from OAuth2 token endpoint: %v", string(bodyRaw)) + } + + var tokenResponse tokenEndpointResponse + err = json.Unmarshal(bodyRaw, &tokenResponse) + if err != nil { + return nil, err + } + + if strings.ToLower(tokenResponse.TokenType) != "bearer" { + return nil, errors.New("unknown token type returned from token endpoint") + } + + return &oauth2Token{ + Token: strings.TrimSpace(tokenResponse.AccessToken), + ExpiresAt: time.Now().Add(time.Duration(tokenResponse.ExpiresIn) * time.Second), + }, nil +} + +func (ap *oauth2ClientCredentialsAuthPlugin) Prepare(req *http.Request) error { + minTokenLifetime := float64(10) + if ap.tokenCache == nil || time.Until(ap.tokenCache.ExpiresAt).Seconds() < minTokenLifetime { + ap.logger.Debug("Requesting token from token_url %v", ap.TokenURL) + token, err := ap.requestToken(req.Context()) + if err != nil { + return err + } + ap.tokenCache = token + } + + req.Header.Add("Authorization", fmt.Sprintf("Bearer %v", ap.tokenCache.Token)) + return nil +} + +// clientTLSAuthPlugin represents authentication via client certificate on a TLS connection +type clientTLSAuthPlugin struct { + Cert string `json:"cert"` + PrivateKey string `json:"private_key"` + PrivateKeyPassphrase string `json:"private_key_passphrase,omitempty"` + CACert string `json:"ca_cert,omitempty"` // Deprecated: Use `services[_].tls.ca_cert` instead + SystemCARequired bool `json:"system_ca_required,omitempty"` // Deprecated: Use `services[_].tls.system_ca_required` instead +} + +func (ap *clientTLSAuthPlugin) NewClient(c Config) (*http.Client, error) { + tlsConfig, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + if ap.Cert == "" { + return nil, errors.New("client certificate is needed when client TLS is enabled") + } + if ap.PrivateKey == "" { + return nil, errors.New("private key is needed when client TLS is enabled") + } + + var keyPEMBlock []byte + data, err := os.ReadFile(ap.PrivateKey) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(data) + if block == nil { + return nil, errors.New("PEM data could not be found") + } + + // nolint: staticcheck // We don't want to forbid users from using this encryption. + if x509.IsEncryptedPEMBlock(block) { + if ap.PrivateKeyPassphrase == "" { + return nil, errors.New("client certificate passphrase is needed, because the certificate is password encrypted") + } + // nolint: staticcheck // We don't want to forbid users from using this encryption. + block, err := x509.DecryptPEMBlock(block, []byte(ap.PrivateKeyPassphrase)) + if err != nil { + return nil, err + } + key, err := x509.ParsePKCS8PrivateKey(block) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(block) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) + } + } + rsa, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + keyPEMBlock = pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(rsa), + }, + ) + } else { + keyPEMBlock = data + } + + certPEMBlock, err := os.ReadFile(ap.Cert) + if err != nil { + return nil, err + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + + var client *http.Client + + if c.TLS != nil && c.TLS.CACert != "" { + client = DefaultRoundTripperClient(tlsConfig, *c.ResponseHeaderTimeoutSeconds) + } else { + if ap.CACert != "" { + c.logger.Warn("Deprecated 'services[_].credentials.client_tls.ca_cert' configuration specified. Use 'services[_].tls.ca_cert' instead. See https://www.openpolicyagent.org/docs/latest/configuration/#services") + caCert, err := os.ReadFile(ap.CACert) + if err != nil { + return nil, err + } + + var caCertPool *x509.CertPool + if ap.SystemCARequired { + caCertPool, err = x509.SystemCertPool() + if err != nil { + return nil, err + } + } else { + caCertPool = x509.NewCertPool() + } + + ok := caCertPool.AppendCertsFromPEM(caCert) + if !ok { + return nil, errors.New("unable to parse and append CA certificate to certificate pool") + } + tlsConfig.RootCAs = caCertPool + } + + client = DefaultRoundTripperClient(tlsConfig, *c.ResponseHeaderTimeoutSeconds) + } + + return client, nil +} + +func (ap *clientTLSAuthPlugin) Prepare(req *http.Request) error { + return nil +} + +// awsSigningAuthPlugin represents authentication using AWS V4 HMAC signing in the Authorization header +type awsSigningAuthPlugin struct { + AWSEnvironmentCredentials *awsEnvironmentCredentialService `json:"environment_credentials,omitempty"` + AWSMetadataCredentials *awsMetadataCredentialService `json:"metadata_credentials,omitempty"` + AWSWebIdentityCredentials *awsWebIdentityCredentialService `json:"web_identity_credentials,omitempty"` + AWSProfileCredentials *awsProfileCredentialService `json:"profile_credentials,omitempty"` + + AWSService string `json:"service,omitempty"` + AWSSignatureVersion string `json:"signature_version,omitempty"` + + ecrAuthPlugin *ecrAuthPlugin + + logger logging.Logger +} + +type awsCredentialServiceChain struct { + awsCredentialServices []awsCredentialService + logger logging.Logger +} + +func (acs *awsCredentialServiceChain) addService(service awsCredentialService) { + acs.awsCredentialServices = append(acs.awsCredentialServices, service) +} + +func (acs *awsCredentialServiceChain) credentials(ctx context.Context) (aws.Credentials, error) { + for _, service := range acs.awsCredentialServices { + credential, err := service.credentials(ctx) + if err != nil { + acs.logger.Debug("awsSigningAuthPlugin:%T failed: %v", service, err) + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return aws.Credentials{}, err + } + + continue + } + + acs.logger.Debug("awsSigningAuthPlugin:%T successful", service) + return credential, nil + } + + return aws.Credentials{}, errors.New("all AWS credential providers failed") +} + +func (ap *awsSigningAuthPlugin) awsCredentialService() awsCredentialService { + chain := awsCredentialServiceChain{ + logger: ap.logger, + } + + /* + Here we maintain the order of addition to the chain inline with + the order of credential providers followed by default by the + AWS SDK. For example + + https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html + */ + + if ap.AWSEnvironmentCredentials != nil { + ap.AWSEnvironmentCredentials.logger = ap.logger + chain.addService(ap.AWSEnvironmentCredentials) + } + + if ap.AWSWebIdentityCredentials != nil { + ap.AWSWebIdentityCredentials.logger = ap.logger + chain.addService(ap.AWSWebIdentityCredentials) + } + + if ap.AWSProfileCredentials != nil { + ap.AWSProfileCredentials.logger = ap.logger + chain.addService(ap.AWSProfileCredentials) + } + + if ap.AWSMetadataCredentials != nil { + ap.AWSMetadataCredentials.logger = ap.logger + chain.addService(ap.AWSMetadataCredentials) + } + + return &chain +} + +func (ap *awsSigningAuthPlugin) NewClient(c Config) (*http.Client, error) { + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + if ap.logger == nil { + ap.logger = c.logger + } + + if err := ap.validateAndSetDefaults(c.Type); err != nil { + return nil, err + } + + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +func (ap *awsSigningAuthPlugin) Prepare(req *http.Request) error { + switch ap.AWSService { + case "ecr": + return ap.ecrAuthPlugin.Prepare(req) + default: + creds, err := ap.awsCredentialService().credentials(req.Context()) + if err != nil { + return fmt.Errorf("failed to get aws credentials: %w", err) + } + + ap.logger.Debug("Signing request with AWS credentials.") + + return aws.SignRequest(req, ap.AWSService, creds, time.Now(), ap.AWSSignatureVersion) + } +} + +func (ap *awsSigningAuthPlugin) validateAndSetDefaults(serviceType string) error { + cfgs := map[bool]int{} + cfgs[ap.AWSEnvironmentCredentials != nil]++ + cfgs[ap.AWSMetadataCredentials != nil]++ + cfgs[ap.AWSWebIdentityCredentials != nil]++ + cfgs[ap.AWSProfileCredentials != nil]++ + + if cfgs[true] == 0 { + return errors.New("a AWS credential service must be specified when S3 signing is enabled") + } + + if ap.AWSMetadataCredentials != nil { + if ap.AWSMetadataCredentials.RegionName == "" { + return errors.New("at least aws_region must be specified for AWS metadata credential service") + } + } + + if ap.AWSWebIdentityCredentials != nil { + if err := ap.AWSWebIdentityCredentials.populateFromEnv(); err != nil { + return err + } + } + + ap.AWSService = strings.ToLower(ap.AWSService) + + // Only allow ECR for OCI service types + if serviceType == "oci" { + if ap.AWSService == "" { + ap.AWSService = "ecr" + } + + if ap.AWSService != "ecr" { + return fmt.Errorf(`cannot use aws service %q with service type "oci"`, ap.AWSService) + } + + // We need to setup a special auth plugin for ECR. + ap.ecrAuthPlugin = newECRAuthPlugin(ap) + } else { + // Disallow ECR for non-OCI service types + if ap.AWSService == "ecr" { + return errors.New(`aws service "ecr" must be used with service type "oci"`) + } + if ap.AWSService == "" { + ap.AWSService = awsSigv4SigningDefaultService + } + } + + if ap.AWSSignatureVersion == "" { + ap.AWSSignatureVersion = "4" + } + + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go new file mode 100644 index 00000000000..70025f6d8d5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go @@ -0,0 +1,519 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rest + +import ( + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-ini/ini" + "github.com/open-policy-agent/opa/internal/providers/aws" + "github.com/open-policy-agent/opa/logging" +) + +const ( + // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + ec2DefaultCredServicePath = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" + + // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + ec2DefaultTokenPath = "http://169.254.169.254/latest/api/token" + + // ref. https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-iam-roles.html + ecsDefaultCredServicePath = "http://169.254.170.2" + ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + + // ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html + stsDefaultDomain = "amazonaws.com" + stsDefaultPath = "https://sts.%s" + stsRegionPath = "https://sts.%s.%s" + + // ref. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html + accessKeyEnvVar = "AWS_ACCESS_KEY_ID" + secretKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + securityTokenEnvVar = "AWS_SECURITY_TOKEN" + sessionTokenEnvVar = "AWS_SESSION_TOKEN" + awsRegionEnvVar = "AWS_REGION" + awsDomainEnvVar = "AWS_DOMAIN" + awsRoleArnEnvVar = "AWS_ROLE_ARN" + awsWebIdentityTokenFileEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + awsProfileEnvVar = "AWS_PROFILE" + + // ref. https://docs.aws.amazon.com/sdkref/latest/guide/settings-global.html + accessKeyGlobalSetting = "aws_access_key_id" + secretKeyGlobalSetting = "aws_secret_access_key" + securityTokenGlobalSetting = "aws_session_token" +) + +// awsCredentialService represents the interface for AWS credential providers +type awsCredentialService interface { + credentials(context.Context) (aws.Credentials, error) +} + +// awsEnvironmentCredentialService represents an static environment-variable credential provider for AWS +type awsEnvironmentCredentialService struct { + logger logging.Logger +} + +func (cs *awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) { + var creds aws.Credentials + creds.AccessKey = os.Getenv(accessKeyEnvVar) + if creds.AccessKey == "" { + return creds, errors.New("no " + accessKeyEnvVar + " set in environment") + } + creds.SecretKey = os.Getenv(secretKeyEnvVar) + if creds.SecretKey == "" { + return creds, errors.New("no " + secretKeyEnvVar + " set in environment") + } + creds.RegionName = os.Getenv(awsRegionEnvVar) + if creds.RegionName == "" { + return creds, errors.New("no " + awsRegionEnvVar + " set in environment") + } + // SessionToken is required if using temporary ENV credentials from assumed IAM role + // Missing SessionToken results with 403 s3 error. + creds.SessionToken = os.Getenv(sessionTokenEnvVar) + if creds.SessionToken == "" { + // In case of missing SessionToken try to get SecurityToken + // AWS switched to use SessionToken, but SecurityToken was left for backward compatibility + creds.SessionToken = os.Getenv(securityTokenEnvVar) + } + + return creds, nil +} + +// awsProfileCredentialService represents a credential provider for AWS that extracts credentials from the AWS +// credentials file +type awsProfileCredentialService struct { + + // Path to the credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Path string `json:"path,omitempty"` + + // AWS Profile to extract credentials from the credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string `json:"profile,omitempty"` + + RegionName string `json:"aws_region"` + + logger logging.Logger +} + +func (cs *awsProfileCredentialService) credentials(context.Context) (aws.Credentials, error) { + var creds aws.Credentials + + filename, err := cs.path() + if err != nil { + return creds, err + } + + cfg, err := ini.Load(filename) + if err != nil { + return creds, fmt.Errorf("failed to read credentials file: %v", err) + } + + profile, err := cfg.GetSection(cs.profile()) + if err != nil { + return creds, fmt.Errorf("failed to get profile: %v", err) + } + + creds.AccessKey = profile.Key(accessKeyGlobalSetting).String() + if creds.AccessKey == "" { + return creds, fmt.Errorf("profile \"%v\" in credentials file %v does not contain \"%v\"", cs.Profile, cs.Path, accessKeyGlobalSetting) + } + + creds.SecretKey = profile.Key(secretKeyGlobalSetting).String() + if creds.SecretKey == "" { + return creds, fmt.Errorf("profile \"%v\" in credentials file %v does not contain \"%v\"", cs.Profile, cs.Path, secretKeyGlobalSetting) + } + + creds.SessionToken = profile.Key(securityTokenGlobalSetting).String() // default to empty string + + if cs.RegionName == "" { + if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { + return creds, errors.New("no " + awsRegionEnvVar + " set in environment or configuration") + } + } + creds.RegionName = cs.RegionName + + return creds, nil +} + +func (cs *awsProfileCredentialService) path() (string, error) { + if len(cs.Path) != 0 { + return cs.Path, nil + } + + if cs.Path = os.Getenv(awsCredentialsFileEnvVar); len(cs.Path) != 0 { + return cs.Path, nil + } + + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("user home directory not found: %w", err) + } + + cs.Path = filepath.Join(homeDir, ".aws", "credentials") + + return cs.Path, nil +} + +func (cs *awsProfileCredentialService) profile() string { + if cs.Profile != "" { + return cs.Profile + } + + cs.Profile = os.Getenv(awsProfileEnvVar) + + if cs.Profile == "" { + cs.Profile = "default" + } + + return cs.Profile +} + +// awsMetadataCredentialService represents an EC2 metadata service credential provider for AWS +type awsMetadataCredentialService struct { + RoleName string `json:"iam_role,omitempty"` + RegionName string `json:"aws_region"` + creds aws.Credentials + expiration time.Time + credServicePath string + tokenPath string + logger logging.Logger +} + +func (cs *awsMetadataCredentialService) urlForMetadataService() (string, error) { + // override default path for testing + if cs.credServicePath != "" { + return cs.credServicePath + cs.RoleName, nil + } + // otherwise, normal flow + // if a role name is provided, look up via the EC2 credential service + if cs.RoleName != "" { + return ec2DefaultCredServicePath + cs.RoleName, nil + } + // otherwise, check environment to see if it looks like we're in an ECS + // container (with implied role association) + if isECS() { + return ecsDefaultCredServicePath + os.Getenv(ecsRelativePathEnvVar), nil + } + // if there's no role name and we don't appear to have a path to the + // ECS container service, then the configuration is invalid + return "", errors.New("metadata endpoint cannot be determined from settings and environment") +} + +func (cs *awsMetadataCredentialService) tokenRequest(ctx context.Context) (*http.Request, error) { + tokenURL := ec2DefaultTokenPath + if cs.tokenPath != "" { + // override for testing + tokenURL = cs.tokenPath + } + req, err := http.NewRequestWithContext(ctx, http.MethodPut, tokenURL, nil) + if err != nil { + return nil, err + } + + // we are going to use the token in the immediate future, so a long TTL is not necessary + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60") + return req, nil +} + +func (cs *awsMetadataCredentialService) refreshFromService(ctx context.Context) error { + // define the expected JSON payload from the EC2 credential service + // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + type metadataPayload struct { + Code string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Token string + Expiration time.Time + } + + // Short circuit if a reasonable amount of time until credential expiration remains + const tokenExpirationMargin = 5 * time.Minute + + if time.Now().Add(tokenExpirationMargin).Before(cs.expiration) { + cs.logger.Debug("Credentials previously obtained from metadata service still valid.") + return nil + } + + cs.logger.Debug("Obtaining credentials from metadata service.") + metaDataURL, err := cs.urlForMetadataService() + if err != nil { + // configuration issue or missing ECS environment + return err + } + + // construct an HTTP client with a reasonably short timeout + client := &http.Client{Timeout: time.Second * 10} + req, err := http.NewRequestWithContext(ctx, http.MethodGet, metaDataURL, nil) + if err != nil { + return errors.New("unable to construct metadata HTTP request: " + err.Error()) + } + + // if in the EC2 environment, we will use IMDSv2, which requires a session cookie from a + // PUT request on the token endpoint before it will give the credentials, this provides + // protection from SSRF attacks + if !isECS() { + tokenReq, err := cs.tokenRequest(ctx) + if err != nil { + return errors.New("unable to construct metadata token HTTP request: " + err.Error()) + } + body, err := aws.DoRequestWithClient(tokenReq, client, "metadata token", cs.logger) + if err != nil { + return err + } + // token is the body of response; add to header of metadata request + req.Header.Set("X-aws-ec2-metadata-token", string(body)) + } + + body, err := aws.DoRequestWithClient(req, client, "metadata", cs.logger) + if err != nil { + return err + } + + var payload metadataPayload + err = json.Unmarshal(body, &payload) + if err != nil { + return errors.New("failed to parse credential response from metadata service: " + err.Error()) + } + + // Only the EC2 endpoint returns the "Code" element which indicates whether the query was + // successful; the ECS endpoint does not! Some other fields are missing in the ECS payload + // but we do not depend on them. + if cs.RoleName != "" && payload.Code != "Success" { + return errors.New("metadata service query did not succeed: " + payload.Code) + } + + cs.expiration = payload.Expiration + cs.creds.AccessKey = payload.AccessKeyID + cs.creds.SecretKey = payload.SecretAccessKey + cs.creds.SessionToken = payload.Token + cs.creds.RegionName = cs.RegionName + + return nil +} + +func (cs *awsMetadataCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { + err := cs.refreshFromService(ctx) + if err != nil { + return cs.creds, err + } + return cs.creds, nil +} + +// awsWebIdentityCredentialService represents an STS WebIdentity credential services +type awsWebIdentityCredentialService struct { + RoleArn string + WebIdentityTokenFile string + RegionName string `json:"aws_region"` + SessionName string `json:"session_name"` + Domain string `json:"aws_domain"` + stsURL string + creds aws.Credentials + expiration time.Time + logger logging.Logger +} + +func (cs *awsWebIdentityCredentialService) populateFromEnv() error { + cs.RoleArn = os.Getenv(awsRoleArnEnvVar) + if cs.RoleArn == "" { + return errors.New("no " + awsRoleArnEnvVar + " set in environment") + } + cs.WebIdentityTokenFile = os.Getenv(awsWebIdentityTokenFileEnvVar) + if cs.WebIdentityTokenFile == "" { + return errors.New("no " + awsWebIdentityTokenFileEnvVar + " set in environment") + } + + if cs.Domain == "" { + cs.Domain = os.Getenv(awsDomainEnvVar) + } + + if cs.RegionName == "" { + if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { + return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") + } + } + return nil +} + +func (cs *awsWebIdentityCredentialService) stsPath() string { + var domain string + if cs.Domain != "" { + domain = strings.ToLower(cs.Domain) + } else { + domain = stsDefaultDomain + } + + var stsPath string + switch { + case cs.stsURL != "": + stsPath = cs.stsURL + case cs.RegionName != "": + stsPath = fmt.Sprintf(stsRegionPath, strings.ToLower(cs.RegionName), domain) + default: + stsPath = fmt.Sprintf(stsDefaultPath, domain) + } + return stsPath +} + +func (cs *awsWebIdentityCredentialService) refreshFromService(ctx context.Context) error { + // define the expected JSON payload from the EC2 credential service + // ref. https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + type responsePayload struct { + Result struct { + Credentials struct { + SessionToken string + SecretAccessKey string + Expiration time.Time + AccessKeyID string `xml:"AccessKeyId"` + } + } `xml:"AssumeRoleWithWebIdentityResult"` + } + + // short circuit if a reasonable amount of time until credential expiration remains + if time.Now().Add(time.Minute * 5).Before(cs.expiration) { + cs.logger.Debug("Credentials previously obtained from sts service still valid.") + return nil + } + + cs.logger.Debug("Obtaining credentials from sts for role %s.", cs.RoleArn) + + var sessionName string + if cs.SessionName == "" { + sessionName = "open-policy-agent" + } else { + sessionName = cs.SessionName + } + + tokenData, err := os.ReadFile(cs.WebIdentityTokenFile) + if err != nil { + return errors.New("unable to read web token for sts HTTP request: " + err.Error()) + } + + token := string(tokenData) + + queryVals := url.Values{ + "Action": []string{"AssumeRoleWithWebIdentity"}, + "RoleSessionName": []string{sessionName}, + "RoleArn": []string{cs.RoleArn}, + "WebIdentityToken": []string{token}, + "Version": []string{"2011-06-15"}, + } + stsRequestURL, _ := url.Parse(cs.stsPath()) + + // construct an HTTP client with a reasonably short timeout + client := &http.Client{Timeout: time.Second * 10} + req, err := http.NewRequestWithContext(ctx, http.MethodPost, stsRequestURL.String(), strings.NewReader(queryVals.Encode())) + if err != nil { + return errors.New("unable to construct STS HTTP request: " + err.Error()) + } + + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + body, err := aws.DoRequestWithClient(req, client, "STS", cs.logger) + if err != nil { + return err + } + + var payload responsePayload + err = xml.Unmarshal(body, &payload) + if err != nil { + return errors.New("failed to parse credential response from STS service: " + err.Error()) + } + + cs.expiration = payload.Result.Credentials.Expiration + cs.creds.AccessKey = payload.Result.Credentials.AccessKeyID + cs.creds.SecretKey = payload.Result.Credentials.SecretAccessKey + cs.creds.SessionToken = payload.Result.Credentials.SessionToken + cs.creds.RegionName = cs.RegionName + + return nil +} + +func (cs *awsWebIdentityCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { + err := cs.refreshFromService(ctx) + if err != nil { + return cs.creds, err + } + return cs.creds, nil +} + +func isECS() bool { + // the special relative path URI is set by the container agent in the ECS environment only + _, isECS := os.LookupEnv(ecsRelativePathEnvVar) + return isECS +} + +// ecrAuthPlugin authorizes requests to AWS ECR. +type ecrAuthPlugin struct { + token aws.ECRAuthorizationToken + + // awsAuthPlugin is used to sign ecr authorization token requests. + awsAuthPlugin *awsSigningAuthPlugin + + // ecr represents the service we request tokens from. + ecr ecr + + logger logging.Logger +} + +type ecr interface { + GetAuthorizationToken(context.Context, aws.Credentials, string) (aws.ECRAuthorizationToken, error) +} + +func newECRAuthPlugin(ap *awsSigningAuthPlugin) *ecrAuthPlugin { + return &ecrAuthPlugin{ + awsAuthPlugin: ap, + ecr: aws.NewECR(ap.logger), + logger: ap.logger, + } +} + +// Prepare should be called with any request to AWS ECR. +// It takes care of retrieving an ECR authorization token to sign +// the request with. +func (ap *ecrAuthPlugin) Prepare(r *http.Request) error { + if !ap.token.IsValid() { + ap.logger.Debug("Refreshing ECR auth token") + if err := ap.refreshAuthorizationToken(r.Context()); err != nil { + return err + } + } + + ap.logger.Debug("Signing request with ECR authorization token") + + r.Header.Set("Authorization", fmt.Sprintf("Basic %s", ap.token.AuthorizationToken)) + return nil +} + +func (ap *ecrAuthPlugin) refreshAuthorizationToken(ctx context.Context) error { + creds, err := ap.awsAuthPlugin.awsCredentialService().credentials(ctx) + if err != nil { + return fmt.Errorf("failed to get aws credentials: %w", err) + } + + token, err := ap.ecr.GetAuthorizationToken(ctx, creds, ap.awsAuthPlugin.AWSSignatureVersion) + if err != nil { + return fmt.Errorf("ecr: failed to get authorization token: %w", err) + } + + ap.token = token + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go new file mode 100644 index 00000000000..6a85dea6816 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go @@ -0,0 +1,157 @@ +package rest + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +var ( + azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + defaultAPIVersion = "2018-02-01" + defaultResource = "https://storage.azure.com/" + timeout = 5 * time.Second +) + +// azureManagedIdentitiesToken holds a token for managed identities for Azure resources +type azureManagedIdentitiesToken struct { + AccessToken string `json:"access_token"` + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + Resource string `json:"resource"` + TokenType string `json:"token_type"` +} + +// azureManagedIdentitiesError represents an error fetching an azureManagedIdentitiesToken +type azureManagedIdentitiesError struct { + Err string `json:"error"` + Description string `json:"error_description"` + Endpoint string + StatusCode int +} + +func (e *azureManagedIdentitiesError) Error() string { + return fmt.Sprintf("%v %s retrieving azure token from %s: %s", e.StatusCode, e.Err, e.Endpoint, e.Description) +} + +// azureManagedIdentitiesAuthPlugin uses an azureManagedIdentitiesToken.AccessToken for bearer authorization +type azureManagedIdentitiesAuthPlugin struct { + Endpoint string `json:"endpoint"` + APIVersion string `json:"api_version"` + Resource string `json:"resource"` + ObjectID string `json:"object_id"` + ClientID string `json:"client_id"` + MiResID string `json:"mi_res_id"` +} + +func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) { + if c.Type == "oci" { + return nil, errors.New("azure managed identities auth: OCI service not supported") + } + + if ap.Endpoint == "" { + ap.Endpoint = azureIMDSEndpoint + } + + if ap.Resource == "" { + ap.Resource = defaultResource + } + + if ap.APIVersion == "" { + ap.APIVersion = defaultAPIVersion + } + + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error { + token, err := azureManagedIdentitiesTokenRequest( + ap.Endpoint, ap.APIVersion, ap.Resource, + ap.ObjectID, ap.ClientID, ap.MiResID, + ) + if err != nil { + return err + } + + req.Header.Add("Authorization", "Bearer "+token.AccessToken) + return nil +} + +// azureManagedIdentitiesTokenRequest fetches an azureManagedIdentitiesToken +func azureManagedIdentitiesTokenRequest( + endpoint, apiVersion, resource, objectID, clientID, miResID string, +) (azureManagedIdentitiesToken, error) { + var token azureManagedIdentitiesToken + e := buildAzureManagedIdentitiesRequestPath(endpoint, apiVersion, resource, objectID, clientID, miResID) + + request, err := http.NewRequest("GET", e, nil) + if err != nil { + return token, err + } + request.Header.Add("Metadata", "true") + + httpClient := http.Client{Timeout: timeout} + response, err := httpClient.Do(request) + if err != nil { + return token, err + } + defer response.Body.Close() + + data, err := io.ReadAll(response.Body) + if err != nil { + return token, err + } + + if s := response.StatusCode; s != http.StatusOK { + var azureError azureManagedIdentitiesError + err = json.Unmarshal(data, &azureError) + if err != nil { + return token, err + } + + azureError.Endpoint = e + azureError.StatusCode = s + return token, &azureError + } + + err = json.Unmarshal(data, &token) + if err != nil { + return token, err + } + + return token, nil +} + +// buildAzureManagedIdentitiesRequestPath constructs the request URL for an Azure managed identities token request +func buildAzureManagedIdentitiesRequestPath( + endpoint, apiVersion, resource, objectID, clientID, miResID string, +) string { + params := url.Values{ + "api-version": []string{apiVersion}, + "resource": []string{resource}, + } + + if objectID != "" { + params.Add("object_id", objectID) + } + + if clientID != "" { + params.Add("client_id", clientID) + } + + if miResID != "" { + params.Add("mi_res_id", miResID) + } + + return endpoint + "?" + params.Encode() +} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go new file mode 100644 index 00000000000..c717105c7f3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go @@ -0,0 +1,173 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rest + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +var ( + defaultGCPMetadataEndpoint = "http://metadata.google.internal" + defaultAccessTokenPath = "/computeMetadata/v1/instance/service-accounts/default/token" + defaultIdentityTokenPath = "/computeMetadata/v1/instance/service-accounts/default/identity" +) + +// AccessToken holds a GCP access token. +type AccessToken struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` +} + +type gcpMetadataError struct { + err error + endpoint string + statusCode int +} + +func (e *gcpMetadataError) Error() string { + return fmt.Sprintf("error retrieving gcp ID token from %s %d: %v", e.endpoint, e.statusCode, e.err) +} + +func (e *gcpMetadataError) Unwrap() error { return e.err } + +var ( + errGCPMetadataNotFound = errors.New("not found") + errGCPMetadataInvalidRequest = errors.New("invalid request") + errGCPMetadataUnexpected = errors.New("unexpected error") +) + +// gcpMetadataAuthPlugin represents authentication via GCP metadata service. +type gcpMetadataAuthPlugin struct { + AccessTokenPath string `json:"access_token_path"` + Audience string `json:"audience"` + Endpoint string `json:"endpoint"` + IdentityTokenPath string `json:"identity_token_path"` + Scopes []string `json:"scopes"` +} + +func (ap *gcpMetadataAuthPlugin) NewClient(c Config) (*http.Client, error) { + if ap.Audience == "" && len(ap.Scopes) == 0 { + return nil, errors.New("audience or scopes is required when gcp metadata is enabled") + } + + if ap.Audience != "" && len(ap.Scopes) > 0 { + return nil, errors.New("either audience or scopes can be set, not both, when gcp metadata is enabled") + } + + if ap.Endpoint == "" { + ap.Endpoint = defaultGCPMetadataEndpoint + } + + if ap.AccessTokenPath == "" { + ap.AccessTokenPath = defaultAccessTokenPath + } + + if ap.IdentityTokenPath == "" { + ap.IdentityTokenPath = defaultIdentityTokenPath + } + + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +func (ap *gcpMetadataAuthPlugin) Prepare(req *http.Request) error { + var err error + var token string + + if ap.Audience != "" { + token, err = identityTokenFromMetadataService(ap.Endpoint, ap.IdentityTokenPath, ap.Audience) + if err != nil { + return fmt.Errorf("error retrieving identity token from gcp metadata service: %w", err) + } + } + + if len(ap.Scopes) != 0 { + token, err = accessTokenFromMetadataService(ap.Endpoint, ap.AccessTokenPath, ap.Scopes) + if err != nil { + return fmt.Errorf("error retrieving access token from gcp metadata service: %w", err) + } + } + + req.Header.Add("Authorization", fmt.Sprintf("Bearer %v", token)) + return nil +} + +// accessTokenFromMetadataService returns an access token based on the scopes. +func accessTokenFromMetadataService(endpoint, path string, scopes []string) (string, error) { + s := strings.Join(scopes, ",") + + e := fmt.Sprintf("%s%s?scopes=%s", endpoint, path, s) + + data, err := gcpMetadataServiceRequest(e) + if err != nil { + return "", err + } + + var accessToken AccessToken + err = json.Unmarshal(data, &accessToken) + if err != nil { + return "", err + } + + return accessToken.AccessToken, nil +} + +// identityTokenFromMetadataService returns an identity token based on the audience. +func identityTokenFromMetadataService(endpoint, path, audience string) (string, error) { + e := fmt.Sprintf("%s%s?audience=%s", endpoint, path, audience) + + data, err := gcpMetadataServiceRequest(e) + if err != nil { + return "", err + } + return string(data), nil +} + +func gcpMetadataServiceRequest(endpoint string) ([]byte, error) { + request, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + request.Header.Add("Metadata-Flavor", "Google") + + timeout := time.Duration(5) * time.Second + httpClient := http.Client{Timeout: timeout} + + response, err := httpClient.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + + switch s := response.StatusCode; s { + case 200: + break + case 400: + return nil, &gcpMetadataError{errGCPMetadataInvalidRequest, endpoint, s} + case 404: + return nil, &gcpMetadataError{errGCPMetadataNotFound, endpoint, s} + default: + return nil, &gcpMetadataError{errGCPMetadataUnexpected, endpoint, s} + } + + data, err := io.ReadAll(response.Body) + if err != nil { + return nil, err + } + + return data, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go new file mode 100644 index 00000000000..62164580b4b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go @@ -0,0 +1,368 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package rest implements a REST client for communicating with remote services. +package rest + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httputil" + "reflect" + "strings" + + "github.com/open-policy-agent/opa/internal/version" + "github.com/open-policy-agent/opa/keys" + "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/util" +) + +const ( + defaultResponseHeaderTimeoutSeconds = int64(10) + defaultResponseSizeLimitBytes = 1024 + + grantTypeClientCredentials = "client_credentials" + grantTypeJwtBearer = "jwt_bearer" +) + +// An HTTPAuthPlugin represents a mechanism to construct and configure HTTP authentication for a REST service +type HTTPAuthPlugin interface { + // implementations can assume NewClient will be called before Prepare + NewClient(Config) (*http.Client, error) + Prepare(*http.Request) error +} + +// Config represents configuration for a REST client. +type Config struct { + Name string `json:"name"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + AllowInsecureTLS bool `json:"allow_insecure_tls,omitempty"` + ResponseHeaderTimeoutSeconds *int64 `json:"response_header_timeout_seconds,omitempty"` + TLS *serverTLSConfig `json:"tls,omitempty"` + Credentials struct { + Bearer *bearerAuthPlugin `json:"bearer,omitempty"` + OAuth2 *oauth2ClientCredentialsAuthPlugin `json:"oauth2,omitempty"` + ClientTLS *clientTLSAuthPlugin `json:"client_tls,omitempty"` + S3Signing *awsSigningAuthPlugin `json:"s3_signing,omitempty"` + GCPMetadata *gcpMetadataAuthPlugin `json:"gcp_metadata,omitempty"` + AzureManagedIdentity *azureManagedIdentitiesAuthPlugin `json:"azure_managed_identity,omitempty"` + Plugin *string `json:"plugin,omitempty"` + } `json:"credentials"` + Type string `json:"type,omitempty"` + keys map[string]*keys.Config + logger logging.Logger +} + +// Equal returns true if this client config is equal to the other. +func (c *Config) Equal(other *Config) bool { + otherWithoutLogger := *other + otherWithoutLogger.logger = c.logger + return reflect.DeepEqual(c, &otherWithoutLogger) +} + +// An AuthPluginLookupFunc can lookup auth plugins by their name. +type AuthPluginLookupFunc func(name string) HTTPAuthPlugin + +// AuthPlugin should be used to get an authentication method from the config. +func (c *Config) AuthPlugin(lookup AuthPluginLookupFunc) (HTTPAuthPlugin, error) { + var candidate HTTPAuthPlugin + if c.Credentials.Plugin != nil { + if lookup == nil { + // if no authPluginLookup function is passed we can't resolve the plugin + return nil, errors.New("missing auth plugin lookup function") + } + + candidate := lookup(*c.Credentials.Plugin) + if candidate == nil { + return nil, fmt.Errorf("auth plugin %q not found", *c.Credentials.Plugin) + } + + return candidate, nil + } + // reflection avoids need for this code to change as auth plugins are added + s := reflect.ValueOf(c.Credentials) + for i := 0; i < s.NumField(); i++ { + if s.Field(i).IsNil() { + continue + } + + if candidate != nil { + return nil, errors.New("a maximum one credential method must be specified") + } + + candidate = s.Field(i).Interface().(HTTPAuthPlugin) + } + + if candidate == nil { + return &defaultAuthPlugin{}, nil + } + return candidate, nil +} + +func (c *Config) authHTTPClient(lookup AuthPluginLookupFunc) (*http.Client, error) { + plugin, err := c.AuthPlugin(lookup) + if err != nil { + return nil, err + } + return plugin.NewClient(*c) +} + +func (c *Config) authPrepare(req *http.Request, lookup AuthPluginLookupFunc) error { + plugin, err := c.AuthPlugin(lookup) + if err != nil { + return err + } + return plugin.Prepare(req) +} + +// Client implements an HTTP/REST client for communicating with remote +// services. +type Client struct { + bytes *[]byte + json *interface{} + config Config + headers map[string]string + authPluginLookup AuthPluginLookupFunc + logger logging.Logger + loggerFields map[string]interface{} + distributedTacingOpts tracing.Options +} + +// Name returns an option that overrides the service name on the client. +func Name(s string) func(*Client) { + return func(c *Client) { + c.config.Name = s + } +} + +// AuthPluginLookup assigns a function to lookup an HTTPAuthPlugin to a new Client. +// It's intended to be used when creating a Client using New(). Usually this is passed +// the plugins.AuthPlugin func, which retrieves a registered HTTPAuthPlugin from the +// plugin manager. +func AuthPluginLookup(l AuthPluginLookupFunc) func(*Client) { + return func(c *Client) { + c.authPluginLookup = l + } +} + +// Logger assigns a logger to the client +func Logger(l logging.Logger) func(*Client) { + return func(c *Client) { + c.logger = l + } +} + +// DistributedTracingOpts sets the options to be used by distributed tracing. +func DistributedTracingOpts(tr tracing.Options) func(*Client) { + return func(c *Client) { + c.distributedTacingOpts = tr + } +} + +// New returns a new Client for config. +func New(config []byte, keys map[string]*keys.Config, opts ...func(*Client)) (Client, error) { + var parsedConfig Config + if err := util.Unmarshal(config, &parsedConfig); err != nil { + return Client{}, err + } + + parsedConfig.URL = strings.TrimRight(parsedConfig.URL, "/") + + if parsedConfig.ResponseHeaderTimeoutSeconds == nil { + timeout := defaultResponseHeaderTimeoutSeconds + parsedConfig.ResponseHeaderTimeoutSeconds = &timeout + } + + parsedConfig.keys = keys + + client := Client{ + config: parsedConfig, + } + + for _, f := range opts { + f(&client) + } + + if client.logger == nil { + client.logger = logging.Get() + } + + client.config.logger = client.logger + + return client, nil +} + +// AuthPluginLookup returns the lookup function to find a custom registered +// auth plugin by its name. +func (c Client) AuthPluginLookup() AuthPluginLookupFunc { + return c.authPluginLookup +} + +// Service returns the name of the service this Client is configured for. +func (c Client) Service() string { + return c.config.Name +} + +// Config returns this Client's configuration +func (c Client) Config() *Config { + return &c.config +} + +// SetResponseHeaderTimeout sets the "ResponseHeaderTimeout" in the http client's Transport +func (c Client) SetResponseHeaderTimeout(timeout *int64) Client { + c.config.ResponseHeaderTimeoutSeconds = timeout + return c +} + +// Logger returns the logger assigned to the Client +func (c Client) Logger() logging.Logger { + return c.logger +} + +// LoggerFields returns the fields used for log statements used by Client +func (c Client) LoggerFields() map[string]interface{} { + return c.loggerFields +} + +// WithHeader returns a shallow copy of the client with a header to include the +// requests. +func (c Client) WithHeader(k, v string) Client { + if v == "" { + return c + } + if c.headers == nil { + c.headers = map[string]string{} + } + c.headers[k] = v + return c +} + +// WithJSON returns a shallow copy of the client with the JSON value set as the +// message body to include the requests. This function sets the Content-Type +// header. +func (c Client) WithJSON(body interface{}) Client { + c = c.WithHeader("Content-Type", "application/json") + c.json = &body + return c +} + +// WithBytes returns a shallow copy of the client with the bytes set as the +// message body to include in the requests. +func (c Client) WithBytes(body []byte) Client { + c.bytes = &body + return c +} + +// Do executes a request using the client. +func (c Client) Do(ctx context.Context, method, path string) (*http.Response, error) { + + httpClient, err := c.config.authHTTPClient(c.authPluginLookup) + if err != nil { + return nil, err + } + + if len(c.distributedTacingOpts) > 0 { + httpClient.Transport = tracing.NewTransport(httpClient.Transport, c.distributedTacingOpts) + } + + path = strings.Trim(path, "/") + + var body io.Reader + + if c.bytes != nil { + body = bytes.NewReader(*c.bytes) + } else if c.json != nil { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(*c.json); err != nil { + return nil, err + } + body = &buf + } + + url := c.config.URL + "/" + path + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + headers := map[string]string{ + "User-Agent": version.UserAgent, + } + + // Copy custom headers from config. + for key, value := range c.config.Headers { + headers[key] = value + } + + // Overwrite with headers set directly on client. + for key, value := range c.headers { + headers[key] = value + } + + for key, value := range headers { + req.Header.Add(key, value) + } + + req = req.WithContext(ctx) + + err = c.config.authPrepare(req, c.authPluginLookup) + if err != nil { + return nil, err + } + + if c.logger.GetLevel() >= logging.Debug { + c.loggerFields = map[string]interface{}{ + "method": method, + "url": url, + "headers": withMaskedAuthorizationHeader(req.Header), + } + + c.logger.WithFields(c.loggerFields).Debug("Sending request.") + } + + resp, err := httpClient.Do(req) + + if resp != nil && c.logger.GetLevel() >= logging.Debug { + // Only log for debug purposes. If an error occurred, the caller should handle + // that. In the non-error case, the caller may not do anything. + c.loggerFields["status"] = resp.Status + c.loggerFields["headers"] = resp.Header + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, err + } + + if len(string(dump)) < defaultResponseSizeLimitBytes { + c.loggerFields["response"] = string(dump) + } else { + c.loggerFields["response"] = fmt.Sprintf("%v...", string(dump[:defaultResponseSizeLimitBytes])) + } + } + c.logger.WithFields(c.loggerFields).Debug("Received response.") + } + + return resp, err +} + +func withMaskedAuthorizationHeader(headers http.Header) http.Header { + authzHeader := headers.Get("Authorization") + if authzHeader != "" { + masked := make(http.Header) + for k, v := range headers { + masked[k] = v + } + masked.Set("Authorization", "REDACTED") + return masked + } + return headers +} diff --git a/vendor/github.com/open-policy-agent/opa/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/rego/plugins.go new file mode 100644 index 00000000000..abaa910341e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/rego/plugins.go @@ -0,0 +1,43 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rego + +import ( + "context" + "sync" + + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/ir" +) + +var targetPlugins = map[string]TargetPlugin{} +var pluginMtx sync.Mutex + +type TargetPlugin interface { + IsTarget(string) bool + PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) +} + +type TargetPluginEval interface { + Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) +} + +func (r *Rego) targetPlugin(tgt string) TargetPlugin { + for _, p := range targetPlugins { + if p.IsTarget(tgt) { + return p + } + } + return nil +} + +func RegisterPlugin(name string, p TargetPlugin) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + if _, ok := targetPlugins[name]; ok { + panic("plugin already registered " + name) + } + targetPlugins[name] = p +} diff --git a/vendor/github.com/open-policy-agent/opa/rego/rego.go b/vendor/github.com/open-policy-agent/opa/rego/rego.go index d2e8c16cec4..73b34c28edd 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/rego.go +++ b/vendor/github.com/open-policy-agent/opa/rego/rego.go @@ -25,6 +25,7 @@ import ( "github.com/open-policy-agent/opa/ir" "github.com/open-policy-agent/opa/loader" "github.com/open-policy-agent/opa/metrics" + "github.com/open-policy-agent/opa/plugins" "github.com/open-policy-agent/opa/resolver" "github.com/open-policy-agent/opa/storage" "github.com/open-policy-agent/opa/storage/inmem" @@ -122,6 +123,55 @@ type EvalContext struct { copyMaps bool printHook print.Hook capabilities *ast.Capabilities + strictBuiltinErrors bool +} + +func (e *EvalContext) RawInput() *interface{} { + return e.rawInput +} + +func (e *EvalContext) ParsedInput() ast.Value { + return e.parsedInput +} + +func (e *EvalContext) Time() time.Time { + return e.time +} + +func (e *EvalContext) Seed() io.Reader { + return e.seed +} + +func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { + return e.interQueryBuiltinCache +} + +func (e *EvalContext) PrintHook() print.Hook { + return e.printHook +} + +func (e *EvalContext) Metrics() metrics.Metrics { + return e.metrics +} + +func (e *EvalContext) StrictBuiltinErrors() bool { + return e.strictBuiltinErrors +} + +func (e *EvalContext) NDBCache() builtins.NDBCache { + return e.ndBuiltinCache +} + +func (e *EvalContext) CompiledQuery() ast.Body { + return e.compiledQuery.query +} + +func (e *EvalContext) Capabilities() *ast.Capabilities { + return e.capabilities +} + +func (e *EvalContext) Transaction() storage.Transaction { + return e.txn } // EvalOption defines a function to set an option on an EvalConfig @@ -314,23 +364,24 @@ func (pq preparedQuery) Modules() map[string]*ast.Module { // been opened. func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { ectx := &EvalContext{ - hasInput: false, - rawInput: nil, - parsedInput: nil, - metrics: nil, - txn: nil, - instrument: false, - instrumentation: nil, - partialNamespace: pq.r.partialNamespace, - queryTracers: nil, - unknowns: pq.r.unknowns, - parsedUnknowns: pq.r.parsedUnknowns, - compiledQuery: compiledQuery{}, - indexing: true, - earlyExit: true, - resolvers: pq.r.resolvers, - printHook: pq.r.printHook, - capabilities: pq.r.capabilities, + hasInput: false, + rawInput: nil, + parsedInput: nil, + metrics: nil, + txn: nil, + instrument: false, + instrumentation: nil, + partialNamespace: pq.r.partialNamespace, + queryTracers: nil, + unknowns: pq.r.unknowns, + parsedUnknowns: pq.r.parsedUnknowns, + compiledQuery: compiledQuery{}, + indexing: true, + earlyExit: true, + resolvers: pq.r.resolvers, + printHook: pq.r.printHook, + capabilities: pq.r.capabilities, + strictBuiltinErrors: pq.r.strictBuiltinErrors, } for _, o := range options { @@ -377,7 +428,9 @@ func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption // Note that it could still be nil ectx.rawInput = pq.r.rawInput } - if pq.r.target != targetWasm { + + if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target + pq.r.target != targetWasm { ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) if err != nil { return nil, finishFunc, err @@ -471,10 +524,10 @@ type queryType int // Define a query type for each of the top level Rego // API's that compile queries differently. const ( - evalQueryType queryType = iota - partialResultQueryType queryType = iota - partialQueryType queryType = iota - compileQueryType queryType = iota + evalQueryType queryType = iota + partialResultQueryType + partialQueryType + compileQueryType ) type loadPaths struct { @@ -538,6 +591,9 @@ type Rego struct { enablePrintStatements bool distributedTacingOpts tracing.Options strict bool + pluginMgr *plugins.Manager + plugins []TargetPlugin + targetPrepState TargetPluginEval } // Function represents a built-in function that is callable in Rego. @@ -1188,6 +1244,14 @@ func New(options ...func(r *Rego)) *Rego { r.generateJSON = generateJSON } + if r.pluginMgr != nil { + for _, name := range r.pluginMgr.Plugins() { + p := r.pluginMgr.Plugin(name) + if p0, ok := p.(TargetPlugin); ok { + r.plugins = append(r.plugins, p0) + } + } + } return r } @@ -1401,64 +1465,32 @@ func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResu queries = []ast.Body{r.compiledQueries[compileQueryType].query} } - return r.compileWasm(modules, queries, compileQueryType) -} - -func (r *Rego) compileWasm(modules []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { - decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) - - for k, v := range ast.BuiltinMap { - decls[k] = v - } - - for k, v := range r.builtinDecls { - decls[k] = v + if tgt := r.targetPlugin(r.target); tgt != nil { + return nil, fmt.Errorf("unsupported for rego target plugins") } - const queryName = "eval" // NOTE(tsandall): the query name is arbitrary + return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here +} - p := planner.New(). - WithQueries([]planner.QuerySet{ - { - Name: queryName, - Queries: queries, - RewrittenVars: r.compiledQueries[qType].compiler.RewrittenVars(), - }, - }). - WithModules(modules). - WithBuiltinDecls(decls). - WithDebug(r.dump) - policy, err := p.Plan() +func (r *Rego) compileWasm(modules []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { + policy, err := r.planQuery(queries, qType) if err != nil { return nil, err } - if r.dump != nil { - fmt.Fprintln(r.dump, "PLAN:") - fmt.Fprintln(r.dump, "-----") - err = ir.Pretty(r.dump, policy) - if err != nil { - return nil, err - } - fmt.Fprintln(r.dump) - } - m, err := wasm.New().WithPolicy(policy).Compile() if err != nil { return nil, err } var out bytes.Buffer - if err := encoding.WriteModule(&out, m); err != nil { return nil, err } - result := &CompileResult{ + return &CompileResult{ Bytes: out.Bytes(), - } - - return result, nil + }, nil } // PrepareOption defines a function to set an option to control @@ -1541,7 +1573,8 @@ func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (Prepa return PreparedEvalQuery{}, err } - if r.target == targetWasm { + switch r.target { + case targetWasm: // TODO(sr): make wasm a target plugin, too if r.hasWasmModule() { _ = txnClose(ctx, err) // Ignore error @@ -1580,6 +1613,20 @@ func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (Prepa return PreparedEvalQuery{}, err } r.opa = o + + case targetRego: // do nothing, don't lookup default plugin + default: // either a specific plugin target, or one that is default + if tgt := r.targetPlugin(r.target); tgt != nil { + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + pol, err := r.planQuery(queries, evalQueryType) + if err != nil { + return PreparedEvalQuery{}, err + } + r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) + if err != nil { + return PreparedEvalQuery{}, err + } + } } txnErr := txnClose(ctx, err) // Always call closer @@ -1731,7 +1778,14 @@ func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metr for _, module := range r.modules { p, err := module.Parse() if err != nil { - errs = append(errs, err) + switch errorWithType := err.(type) { + case ast.Errors: + for _, e := range errorWithType { + errs = append(errs, e) + } + default: + errs = append(errs, errorWithType) + } } r.parsedModules[module.filename] = p } @@ -1952,8 +2006,20 @@ func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, m metrics.Met } func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - if r.opa != nil { + switch { + case r.targetPrepState != nil: // target plugin flow + var val ast.Value + if r.runtime != nil { + val = r.runtime.Value + } + s, err := r.targetPrepState.Eval(ctx, ectx, val) + if err != nil { + return nil, err + } + return r.valueToQueryResult(s, ectx) + case r.target == targetWasm: return r.evalWasm(ctx, ectx) + case r.target == targetRego: // continue } q := topdown.NewQuery(ectx.compiledQuery.query). @@ -2050,7 +2116,11 @@ func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, erro return nil, err } - resultSet, ok := parsed.Value.(ast.Set) + return r.valueToQueryResult(parsed.Value, ectx) +} + +func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { + resultSet, ok := res.(ast.Set) if !ok { return nil, fmt.Errorf("illegal result type") } @@ -2060,7 +2130,7 @@ func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, erro } var rs ResultSet - err = resultSet.Iter(func(term *ast.Term) error { + err := resultSet.Iter(func(term *ast.Term) error { obj, ok := term.Value.(ast.Object) if !ok { return fmt.Errorf("illegal result type") @@ -2137,16 +2207,17 @@ func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialR } ectx := &EvalContext{ - parsedInput: r.parsedInput, - metrics: r.metrics, - txn: r.txn, - partialNamespace: r.partialNamespace, - queryTracers: r.queryTracers, - compiledQuery: r.compiledQueries[partialResultQueryType], - instrumentation: r.instrumentation, - indexing: true, - resolvers: r.resolvers, - capabilities: r.capabilities, + parsedInput: r.parsedInput, + metrics: r.metrics, + txn: r.txn, + partialNamespace: r.partialNamespace, + queryTracers: r.queryTracers, + compiledQuery: r.compiledQueries[partialResultQueryType], + instrumentation: r.instrumentation, + indexing: true, + resolvers: r.resolvers, + capabilities: r.capabilities, + strictBuiltinErrors: r.strictBuiltinErrors, } disableInlining := r.disableInlining @@ -2249,7 +2320,7 @@ func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, WithSkipPartialNamespace(r.skipPartialNamespace). WithShallowInlining(r.shallowInlining). WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithStrictBuiltinErrors(r.strictBuiltinErrors). + WithStrictBuiltinErrors(ectx.strictBuiltinErrors). WithSeed(ectx.seed). WithPrintHook(ectx.printHook) @@ -2383,11 +2454,13 @@ func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query as func (r *Rego) generateTermVar() *ast.Term { r.termVarID++ - - if r.target == targetWasm { - return ast.VarTerm(wasmVarPrefix + fmt.Sprintf("term%v", r.termVarID)) + prefix := ast.WildcardPrefix + if p := r.targetPlugin(r.target); p != nil { + prefix = wasmVarPrefix + } else if r.target == targetWasm { + prefix = wasmVarPrefix } - return ast.VarTerm(ast.WildcardPrefix + fmt.Sprintf("term%v", r.termVarID)) + return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) } func (r Rego) hasQuery() bool { @@ -2570,17 +2643,19 @@ func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, if err != nil { var e *HaltError if errors.As(err, &e) { - return topdown.Halt{Err: &topdown.Error{ + tdErr := &topdown.Error{ Code: topdown.BuiltinErr, Message: fmt.Sprintf("%v: %v", name, e.Error()), Location: bctx.Location, - }} + } + return topdown.Halt{Err: tdErr.Wrap(e)} } - return &topdown.Error{ + tdErr := &topdown.Error{ Code: topdown.BuiltinErr, Message: fmt.Sprintf("%v: %v", name, err.Error()), Location: bctx.Location, } + return tdErr.Wrap(err) } if result == nil { return nil @@ -2610,3 +2685,49 @@ func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) { CopyMaps: ectx.copyMaps, }) } + +func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) + + for k, v := range ast.BuiltinMap { + decls[k] = v + } + + for k, v := range r.builtinDecls { + decls[k] = v + } + + const queryName = "eval" // NOTE(tsandall): the query name is arbitrary + + p := planner.New(). + WithQueries([]planner.QuerySet{ + { + Name: queryName, + Queries: queries, + RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), + }, + }). + WithModules(modules). + WithBuiltinDecls(decls). + WithDebug(r.dump) + + policy, err := p.Plan() + if err != nil { + return nil, err + } + if r.dump != nil { + fmt.Fprintln(r.dump, "PLAN:") + fmt.Fprintln(r.dump, "-----") + err = ir.Pretty(r.dump, policy) + if err != nil { + return nil, err + } + fmt.Fprintln(r.dump) + } + return policy, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go index 5a7f2bc9107..30c488050fa 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go @@ -182,17 +182,19 @@ func handleBuiltinErr(name string, loc *ast.Location, err error) error { case *Error, Halt: return err case builtins.ErrOperand: - return &Error{ + e := &Error{ Code: TypeErr, Message: fmt.Sprintf("%v: %v", name, err.Error()), Location: loc, } + return e.Wrap(err) default: - return &Error{ + e := &Error{ Code: BuiltinErr, Message: fmt.Sprintf("%v: %v", name, err.Error()), Location: loc, } + return e.Wrap(err) } } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go index d7c3afbd893..353f9568405 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go @@ -249,7 +249,11 @@ func NumberToFloat(n ast.Number) *big.Float { // FloatToNumber converts f to a number. func FloatToNumber(f *big.Float) ast.Number { - return ast.Number(f.Text('g', -1)) + var format byte = 'g' + if f.IsInt() { + format = 'f' + } + return ast.Number(f.Text(format, -1)) } // NumberToInt converts n to a big int. diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go index d3f7a1a0d0d..f9d2bcff752 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go @@ -7,11 +7,10 @@ package cache import ( "container/list" + "sync" "github.com/open-policy-agent/opa/ast" - "sync" - "github.com/open-policy-agent/opa/util" ) @@ -62,6 +61,7 @@ func (c *Config) validateAndInjectDefaults() error { // InterQueryCacheValue defines the interface for the data that the inter-query cache holds. type InterQueryCacheValue interface { SizeInBytes() int64 + Clone() (InterQueryCacheValue, error) } // InterQueryCache defines the interface for the inter-query cache. @@ -70,6 +70,7 @@ type InterQueryCache interface { Insert(key ast.Value, value InterQueryCacheValue) int Delete(key ast.Value) UpdateConfig(config *Config) + Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) } // NewInterQueryCache returns a new inter-query cache. @@ -130,6 +131,12 @@ func (c *cache) UpdateConfig(config *Config) { c.config = config } +func (c *cache) Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) { + c.mtx.Lock() + defer c.mtx.Unlock() + return c.unsafeClone(value) +} + func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue) (dropped int) { size := v.SizeInBytes() limit := c.maxSizeBytes() @@ -174,6 +181,10 @@ func (c *cache) unsafeDelete(k ast.Value) { c.l.Remove(cacheItem.keyElement) } +func (c *cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { + return value.Clone() +} + func (c *cache) maxSizeBytes() int64 { if c.config == nil { return defaultMaxSizeBytes diff --git a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go index 3181ab89b7c..5c4cf1b9736 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go @@ -11,6 +11,7 @@ import ( "crypto/sha1" "crypto/sha256" "crypto/sha512" + "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" @@ -96,6 +97,29 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a return iter(valid) } +func builtinCryptoX509ParseKeyPair(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + certificate, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + key, err := builtins.StringOperand(operands[1].Value, 1) + if err != nil { + return err + } + + certs, err := getTLSx509KeyPairFromString([]byte(certificate), []byte(key)) + if err != nil { + return err + } + v, err := ast.InterfaceToValue(certs) + if err != nil { + return err + } + + return iter(ast.NewTerm(v)) +} + func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { input, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { @@ -249,6 +273,24 @@ func builtinCryptoHmacSha512(_ BuiltinContext, operands []*ast.Term, iter func(* return hmacHelper(operands, iter, sha512.New) } +func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + a1 := operands[0].Value + mac1, err := builtins.StringOperand(a1, 1) + if err != nil { + return err + } + + a2 := operands[1].Value + mac2, err := builtins.StringOperand(a2, 2) + if err != nil { + return err + } + + res := hmac.Equal([]byte(mac1), []byte(mac2)) + + return iter(ast.BooleanTerm(res)) +} + func init() { RegisterBuiltinFunc(ast.CryptoX509ParseCertificates.Name, builtinCryptoX509ParseCertificates) RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificates.Name, builtinCryptoX509ParseAndVerifyCertificates) @@ -257,10 +299,12 @@ func init() { RegisterBuiltinFunc(ast.CryptoSha256.Name, builtinCryptoSha256) RegisterBuiltinFunc(ast.CryptoX509ParseCertificateRequest.Name, builtinCryptoX509ParseCertificateRequest) RegisterBuiltinFunc(ast.CryptoX509ParseRSAPrivateKey.Name, builtinCryptoX509ParseRSAPrivateKey) + RegisterBuiltinFunc(ast.CryptoX509ParseKeyPair.Name, builtinCryptoX509ParseKeyPair) RegisterBuiltinFunc(ast.CryptoHmacMd5.Name, builtinCryptoHmacMd5) RegisterBuiltinFunc(ast.CryptoHmacSha1.Name, builtinCryptoHmacSha1) RegisterBuiltinFunc(ast.CryptoHmacSha256.Name, builtinCryptoHmacSha256) RegisterBuiltinFunc(ast.CryptoHmacSha512.Name, builtinCryptoHmacSha512) + RegisterBuiltinFunc(ast.CryptoHmacEqual.Name, builtinCryptoHmacEqual) } func verifyX509CertificateChain(certs []*x509.Certificate) ([]*x509.Certificate, error) { @@ -406,7 +450,7 @@ func addCACertsFromBytes(pool *x509.CertPool, pemBytes []byte) (*x509.CertPool, return pool, nil } -// addCACertsFromBytes adds CA certificates from the environment variable named +// addCACertsFromEnv adds CA certificates from the environment variable named // by envName into the given pool. If pool is nil, it creates a new x509.CertPool. // pool is returned. func addCACertsFromEnv(pool *x509.CertPool, envName string) (*x509.CertPool, error) { @@ -428,6 +472,60 @@ func readCertFromFile(localCertFile string) ([]byte, error) { return certPEM, nil } +func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.Certificate, error) { + + if !strings.HasPrefix(string(certPemBlock), "-----BEGIN") { + s, err := base64.StdEncoding.DecodeString(string(certPemBlock)) + if err != nil { + return nil, err + } + certPemBlock = s + } + + if !strings.HasPrefix(string(keyPemBlock), "-----BEGIN") { + s, err := base64.StdEncoding.DecodeString(string(keyPemBlock)) + if err != nil { + return nil, err + } + keyPemBlock = s + } + + // we assume it a DER certificate and try to convert it to a PEM. + if !bytes.HasPrefix(certPemBlock, []byte("-----BEGIN")) { + + pemBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certPemBlock, + } + + var buf bytes.Buffer + if err := pem.Encode(&buf, pemBlock); err != nil { + return nil, err + } + certPemBlock = buf.Bytes() + + } + // we assume it a DER key and try to convert it to a PEM. + if !bytes.HasPrefix(keyPemBlock, []byte("-----BEGIN")) { + pemBlock := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: keyPemBlock, + } + var buf bytes.Buffer + if err := pem.Encode(&buf, pemBlock); err != nil { + return nil, err + } + keyPemBlock = buf.Bytes() + } + + cert, err := tls.X509KeyPair(certPemBlock, keyPemBlock) + if err != nil { + return nil, err + } + + return &cert, nil +} + // ReadKeyFromFile reads a key from file func readKeyFromFile(localKeyFile string) ([]byte, error) { // Read in the cert file diff --git a/vendor/github.com/open-policy-agent/opa/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/topdown/errors.go index f2459d2ad71..918df6c853a 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/errors.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/errors.go @@ -29,6 +29,7 @@ type Error struct { Code string `json:"code"` Message string `json:"message"` Location *ast.Location `json:"location,omitempty"` + err error `json:"-"` } const ( @@ -90,6 +91,15 @@ func (e *Error) Error() string { return msg } +func (e *Error) Wrap(err error) *Error { + e.err = err + return e +} + +func (e *Error) Unwrap() error { + return e.err +} + func functionConflictErr(loc *ast.Location) error { return &Error{ Code: ConflictErr, diff --git a/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/topdown/eval.go index 1072c95c06f..545dcff8fe7 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/eval.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/eval.go @@ -2503,6 +2503,10 @@ func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Ru } func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.Rule) error { + headKey := rule.Head.Key + if headKey == nil { + headKey = rule.Head.Reference[len(rule.Head.Reference)-1] + } key := e.ref[e.pos+1] child := e.e.child(rule.Body) @@ -2512,7 +2516,7 @@ func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.R err := child.eval(func(child *eval) error { defined = true - return e.e.biunify(rule.Head.Key, key, child.bindings, e.bindings, func() error { + return e.e.biunify(headKey, key, child.bindings, e.bindings, func() error { return e.evalOneRuleContinue(iter, rule, child) }) }) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http.go b/vendor/github.com/open-policy-agent/opa/topdown/http.go index 23dd0a20da4..0b5c16870a7 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/http.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/http.go @@ -67,6 +67,7 @@ var allowedKeyNames = [...]string{ "force_cache_duration_seconds", "raise_error", "caching_mode", + "max_retry_attempts", } // ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1 @@ -104,6 +105,12 @@ const ( // HTTPSendNetworkErr represents a network error. HTTPSendNetworkErr string = "eval_http_send_network_error" + + // minRetryDelay is amount of time to backoff after the first failure. + minRetryDelay = time.Millisecond * 100 + + // maxRetryDelay is the upper bound of backoff delay. + maxRetryDelay = time.Second * 60 ) func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -461,7 +468,7 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt case "cache", "caching_mode", "force_cache", "force_cache_duration_seconds", "force_json_decode", "force_yaml_decode", - "raise_error": // no-op + "raise_error", "max_retry_attempts": // no-op default: return nil, nil, fmt.Errorf("invalid parameter %q", key) } @@ -647,8 +654,39 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt return req, client, nil } -func executeHTTPRequest(req *http.Request, client *http.Client) (*http.Response, error) { - return client.Do(req) +func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) { + var err error + var retry int + + retry, err = getNumberValFromReqObj(inputReqObj, ast.StringTerm("max_retry_attempts")) + if err != nil { + return nil, err + } + + for i := 0; true; i++ { + + var resp *http.Response + resp, err = client.Do(req) + if err == nil { + return resp, nil + } + + // final attempt + if i == retry { + break + } + + if err == context.Canceled { + return nil, err + } + + select { + case <-time.After(util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i)): + case <-req.Context().Done(): + return nil, context.Canceled + } + } + return nil, err } func isContentType(header http.Header, typ ...string) bool { @@ -765,10 +803,16 @@ func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { requestCache := c.bctx.InterQueryBuiltinCache - value, found := requestCache.Get(c.key) + cachedValue, found := requestCache.Get(c.key) if !found { return nil, nil } + + value, cerr := requestCache.Clone(cachedValue) + if cerr != nil { + return nil, handleHTTPSendErr(c.bctx, cerr) + } + c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr() var cachedRespData *interQueryCacheData @@ -803,7 +847,7 @@ func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { // check with the server if the stale response is still up-to-date. // If server returns a new response (ie. status_code=200), update the cache with the new response // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response - result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, headers) + result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers) requestCache.Delete(c.key) if err != nil || result == nil { return nil, err @@ -944,6 +988,23 @@ func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) { return bool(b), nil } +func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { + term := req.Get(key) + if term == nil { + return 0, nil + } + + if t, ok := term.Value.(ast.Number); ok { + num, ok := t.Int() + if !ok || num < 0 { + return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String()) + } + return num, nil + } + + return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String()) +} + func getCachingMode(req ast.Object) (cachingMode, error) { key := ast.StringTerm("caching_mode") var s ast.String @@ -980,6 +1041,12 @@ func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody return &interQueryCacheValue{Data: b}, nil } +func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) { + dup := make([]byte, len(cb.Data)) + copy(dup, cb.Data) + return &interQueryCacheValue{Data: dup}, nil +} + func (cb interQueryCacheValue) SizeInBytes() int64 { return int64(len(cb.Data)) } @@ -1063,6 +1130,18 @@ func (c *interQueryCacheData) SizeInBytes() int64 { return 0 } +func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) { + dup := make([]byte, len(c.RespBody)) + copy(dup, c.RespBody) + + return &interQueryCacheData{ + ExpiresAt: c.ExpiresAt, + RespBody: dup, + Status: c.Status, + StatusCode: c.StatusCode, + Headers: c.Headers.Clone()}, nil +} + type responseHeaders struct { date time.Time // origination date and time of response cacheControl map[string]string // response cache-control header @@ -1100,7 +1179,7 @@ func parseResponseHeaders(headers http.Header) (*responseHeaders, error) { return &result, nil } -func revalidateCachedResponse(req *http.Request, client *http.Client, headers *responseHeaders) (*http.Response, bool, error) { +func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) { etag := headers.etag lastModified := headers.lastModified @@ -1118,7 +1197,7 @@ func revalidateCachedResponse(req *http.Request, client *http.Client, headers *r cloneReq.Header.Set("if-modified-since", lastModified) } - response, err := client.Do(cloneReq) + response, err := executeHTTPRequest(cloneReq, client, inputReqObj) if err != nil { return nil, false, err } @@ -1391,7 +1470,7 @@ func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { return nil, handleHTTPSendErr(c.bctx, err) } - return executeHTTPRequest(c.httpReq, c.httpClient) + return executeHTTPRequest(c.httpReq, c.httpClient, c.key) } type intraQueryCache struct { @@ -1441,7 +1520,7 @@ func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - return executeHTTPRequest(httpReq, httpClient) + return executeHTTPRequest(httpReq, httpClient, c.key) } func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/object.go b/vendor/github.com/open-policy-agent/opa/topdown/object.go index e3baf1fecce..ba5d77ff371 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/object.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/object.go @@ -200,7 +200,16 @@ func mergewithOverwriteInPlace(obj, other ast.Object, frozenKeys map[*ast.Term]s v2 := obj.Get(k) // The key didn't exist in other, keep the original value. if v2 == nil { - obj.Insert(k, v) + nestedObj, ok := v.Value.(ast.Object) + if !ok { + // v is not an object + obj.Insert(k, v) + } else { + // Copy the nested object so the original object would not be modified + nestedObjCopy := nestedObj.Copy() + obj.Insert(k, ast.NewTerm(nestedObjCopy)) + } + return } // The key exists in both. Merge or reject change. diff --git a/vendor/github.com/open-policy-agent/opa/topdown/providers.go b/vendor/github.com/open-policy-agent/opa/topdown/providers.go index 72ad1922ca1..0b9e012268f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/providers.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/providers.go @@ -53,7 +53,7 @@ func getReqBodyBytes(body, rawBody *ast.Term) ([]byte, error) { } func objectToMap(o ast.Object) map[string][]string { - var out map[string][]string + out := make(map[string][]string, o.Len()) o.Foreach(func(k, v *ast.Term) { ks := stringFromTerm(k) vs := stringFromTerm(v) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/query.go b/vendor/github.com/open-policy-agent/opa/topdown/query.go index 6d5bb4b1974..2b540c58a51 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/query.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/query.go @@ -471,6 +471,14 @@ func (q *Query) Run(ctx context.Context) (QueryResultSet, error) { // Iter executes the query and invokes the iter function with query results // produced by evaluating the query. func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { + // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state + if q.compiler != nil && len(q.compiler.Errors) > 0 { + return &Error{ + Code: InternalErr, + Message: "compiler has errors", + } + } + if q.seed == nil { q.seed = rand.Reader } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/topdown/regex.go index b1902a5e7b5..aa818f2dc99 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/regex.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/regex.go @@ -46,7 +46,7 @@ func builtinRegexMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te if err != nil { return err } - return iter(ast.BooleanTerm(re.Match([]byte(s2)))) + return iter(ast.BooleanTerm(re.MatchString(string(s2)))) } func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/subset.go b/vendor/github.com/open-policy-agent/opa/topdown/subset.go index 19c6571c024..7b152a5ef97 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/subset.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/subset.go @@ -193,12 +193,12 @@ func arraySubset(super, sub *ast.Array) bool { return true } - if superCursor == super.Len() { + if superCursor+subCursor == super.Len() { return false } subElem := sub.Elem(subCursor) - superElem := sub.Elem(superCursor + subCursor) + superElem := super.Elem(superCursor + subCursor) if superElem == nil { return false } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/time.go b/vendor/github.com/open-policy-agent/opa/topdown/time.go index f881ab94b2f..ba3efc75dc0 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/time.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/time.go @@ -12,6 +12,7 @@ import ( "strconv" "sync" "time" + _ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/topdown/builtins" @@ -49,7 +50,13 @@ func builtinTimeParseNanos(_ BuiltinContext, operands []*ast.Term, iter func(*as return err } - result, err := time.Parse(string(format), string(value)) + formatStr := string(format) + // look for the formatStr in our acceptedTimeFormats and + // use the constant instead if it matches + if f, ok := acceptedTimeFormats[formatStr]; ok { + formatStr = f + } + result, err := time.Parse(formatStr, string(value)) if err != nil { return err } @@ -82,6 +89,20 @@ func builtinParseDurationNanos(_ BuiltinContext, operands []*ast.Term, iter func return iter(ast.NumberTerm(int64ToJSONNumber(int64(value)))) } +// Represent exposed constants for formatting from the stdlib time pkg +var acceptedTimeFormats = map[string]string{ + "ANSIC": time.ANSIC, + "UnixDate": time.UnixDate, + "RubyDate": time.RubyDate, + "RFC822": time.RFC822, + "RFC822Z": time.RFC822Z, + "RFC850": time.RFC850, + "RFC1123": time.RFC1123, + "RFC1123Z": time.RFC1123Z, + "RFC3339": time.RFC3339, + "RFC3339Nano": time.RFC3339Nano, +} + func builtinFormat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { t, layout, err := tzTime(operands[0].Value) if err != nil { @@ -90,7 +111,12 @@ func builtinFormat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) // Using RFC3339Nano time formatting as default if layout == "" { layout = time.RFC3339Nano + } else if layoutStr, ok := acceptedTimeFormats[layout]; ok { + // if we can find a constant specified, use the constant + layout = layoutStr } + // otherwise try to treat the fmt string as a datetime fmt string + timestamp := t.Format(layout) return iter(ast.StringTerm(timestamp)) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go index 0134e820751..b69f3f2d24a 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go @@ -1065,6 +1065,8 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func if constraints.iss != issVal { return iter(unverified) } + } else { + return iter(unverified) } } // RFC7159 4.1.3 aud diff --git a/vendor/github.com/open-policy-agent/opa/util/json.go b/vendor/github.com/open-policy-agent/opa/util/json.go index 283c496973b..9c87946162a 100644 --- a/vendor/github.com/open-policy-agent/opa/util/json.go +++ b/vendor/github.com/open-policy-agent/opa/util/json.go @@ -12,6 +12,8 @@ import ( "reflect" "github.com/ghodss/yaml" + + "github.com/open-policy-agent/opa/loader/extension" ) // UnmarshalJSON parses the JSON encoded data and stores the result in the value @@ -19,10 +21,17 @@ import ( // // This function is intended to be used in place of the standard json.Marshal // function when json.Number is required. -func UnmarshalJSON(bs []byte, x interface{}) (err error) { +func UnmarshalJSON(bs []byte, x interface{}) error { + return unmarshalJSON(bs, x, true) +} + +func unmarshalJSON(bs []byte, x interface{}, ext bool) error { buf := bytes.NewBuffer(bs) decoder := NewJSONDecoder(buf) if err := decoder.Decode(x); err != nil { + if handler := extension.FindExtension(".json"); handler != nil && ext { + return handler(bs, x) + } return err } @@ -103,14 +112,18 @@ func Reference(x interface{}) *interface{} { return &x } -// Unmarshal decodes a YAML or JSON value into the specified type. +// Unmarshal decodes a YAML, JSON or JSON extension value into the specified type. func Unmarshal(bs []byte, v interface{}) error { if json.Valid(bs) { - return UnmarshalJSON(bs, v) + return unmarshalJSON(bs, v, false) } - bs, err := yaml.YAMLToJSON(bs) - if err != nil { - return err + nbs, err := yaml.YAMLToJSON(bs) + if err == nil { + return unmarshalJSON(nbs, v, false) + } + // not json or yaml: try extensions + if handler := extension.FindExtension(".json"); handler != nil { + return handler(bs, v) } - return UnmarshalJSON(bs, v) + return err } diff --git a/vendor/github.com/open-policy-agent/opa/version/version.go b/vendor/github.com/open-policy-agent/opa/version/version.go index 81673f5e895..ec4a3b821de 100644 --- a/vendor/github.com/open-policy-agent/opa/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/version/version.go @@ -7,10 +7,11 @@ package version import ( "runtime" + "runtime/debug" ) // Version is the canonical version of OPA. -var Version = "0.51.0" +var Version = "0.54.0" // GoVersion is the version of Go this was built with var GoVersion = runtime.Version() @@ -25,3 +26,24 @@ var ( Timestamp = "" Hostname = "" ) + +func init() { + bi, ok := debug.ReadBuildInfo() + if !ok { + return + } + dirty := false + for _, s := range bi.Settings { + switch s.Key { + case "vcs.time": + Timestamp = s.Value + case "vcs.revision": + Vcs = s.Value + case "vcs.modified": + dirty = s.Value == "true" + } + } + if dirty { + Vcs = Vcs + "-dirty" + } +} diff --git a/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go b/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go index 8c02ca49d2a..aa6eec85562 100644 --- a/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go +++ b/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go @@ -6,28 +6,3 @@ // +build go1.18 package version - -import ( - "runtime/debug" -) - -func init() { - bi, ok := debug.ReadBuildInfo() - if !ok { - return - } - dirty := false - for _, s := range bi.Settings { - switch s.Key { - case "vcs.time": - Timestamp = s.Value - case "vcs.revision": - Vcs = s.Value - case "vcs.modified": - dirty = s.Value == "true" - } - } - if dirty { - Vcs = Vcs + "-dirty" - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 12331542dde..deedc2dfbe7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -18,12 +18,12 @@ import ( "sort" "strings" - "github.com/prometheus/client_golang/prometheus/internal" - "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus/internal" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 5b69965b25b..8d818afe90d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -401,7 +401,7 @@ type HistogramOpts struct { // Histogram by a Prometheus server with that feature enabled (requires // Prometheus v2.40+). Sparse buckets are exponential buckets covering // the whole float64 range (with the exception of the “zero” bucket, see - // SparseBucketsZeroThreshold below). From any one bucket to the next, + // NativeHistogramZeroThreshold below). From any one bucket to the next, // the width of the bucket grows by a constant // factor. NativeHistogramBucketFactor provides an upper bound for this // factor (exception see below). The smaller @@ -432,7 +432,7 @@ type HistogramOpts struct { // bucket. For best results, this should be close to a bucket // boundary. This is usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // DefNativeHistogramZeroThreshold is used as the threshold. To configure // a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero @@ -639,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if frac == 0.5 { key-- } - div := 1 << -schema - key = (key + div - 1) / div + offset := (1 << -schema) - 1 + key = (key + offset) >> -schema } if isInf { key++ @@ -817,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) { } } -// limitSparsebuckets applies a strategy to limit the number of populated sparse +// limitBuckets applies a strategy to limit the number of populated sparse // buckets. It's generally best effort, and there are situations where the // number can go higher (if even the lowest resolution isn't enough to reduce // the number sufficiently, or if the provided counts aren't fully updated yet diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index a4cc9810b07..09b8d2fbead 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -37,6 +37,7 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" "sync" "time" @@ -47,9 +48,10 @@ import ( ) const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" + processStartTimeHeader = "Process-Start-Time-Unix" ) var gzipPool = sync.Pool{ @@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if !opts.ProcessStartTime.IsZero() { + rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) + } if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. @@ -366,6 +371,14 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // ProcessStartTime allows setting process start timevalue that will be exposed + // with "Process-Start-Time-Unix" response header along with the metrics + // payload. This allow callers to have efficient transformations to cumulative + // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per + // scrape target. + // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus + // exposition format. + ProcessStartTime time.Time } // gzipAccepted returns whether the client will accept gzip-encoded content. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 386fb2d23e2..f0d0015a0ff 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,6 +20,24 @@ import ( "github.com/prometheus/common/model" ) +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} + +func getLabelsFromPool() Labels { + return labelsPool.Get().(Labels) +} + +func putLabelsToPool(labels Labels) { + for k := range labels { + delete(labels, k) + } + + labelsPool.Put(labels) +} + // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -93,6 +111,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + h, err := m.hashLabels(labels) if err != nil { return false @@ -109,6 +129,8 @@ func (m *MetricVec) Delete(labels Labels) bool { // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + return m.metricMap.deleteByLabels(labels, m.curry) } @@ -229,6 +251,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -647,15 +671,16 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } func constrainLabels(desc *Desc, labels Labels) Labels { - constrainedValues := make(Labels, len(labels)) + constrainedLabels := getLabelsFromPool() for l, v := range labels { if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { - constrainedValues[l] = desc.variableLabels[i].Constrain(v) - continue + v = desc.variableLabels[i].Constrain(v) } - constrainedValues[l] = v + + constrainedLabels[l] = v } - return constrainedValues + + return constrainedLabels } func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index e358db69c5d..b111d256200 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.49.0 +GOLANGCI_LINT_VERSION ?= v1.51.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -205,7 +207,7 @@ common-tarball: promu .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 0102ab0fd85..60c551e026b 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -21,6 +21,7 @@ import ( // kernel data structures. type FS struct { proc fs.FS + real bool } // DefaultMountPoint is the common mount point of the proc filesystem. @@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) { if err != nil { return FS{}, err } - return FS{fs}, nil + + real, err := isRealProc(mountPoint) + if err != nil { + return FS{}, err + } + + return FS{fs, real}, nil } diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go new file mode 100644 index 00000000000..80057696896 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build netbsd || openbsd || solaris || windows +// +build netbsd openbsd solaris windows + +package procfs + +// isRealProc returns true on architectures that don't have a Type argument +// in their Statfs_t struct +func isRealProc(mountPoint string) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go new file mode 100644 index 00000000000..6233217ad29 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !netbsd && !openbsd && !solaris && !windows +// +build !netbsd,!openbsd,!solaris,!windows + +package procfs + +import ( + "syscall" +) + +// isRealProc determines whether supplied mountpoint is really a proc filesystem. +func isRealProc(mountPoint string) (bool, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(mountPoint, &stat) + if err != nil { + return false, err + } + + // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 + return stat.Type == 0x9fa0, nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index b030951faf9..14272dc7885 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) { return us, nil } +// Parses a uint64 from given hex in string. +func ParseHexUint64s(ss []string) ([]*uint64, error) { + us := make([]*uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { data, err := os.ReadFile(path) diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 0c482c18ccf..7f68890cff1 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -186,6 +186,8 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 + // The average time from the point the client sends RPC requests until it receives the response. + AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -534,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], @@ -546,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } + if ns[0] != 0 { + opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) + } if len(ns) > 8 { opStats.Errors = ns[8] diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 8300daca054..64a0e946068 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "strconv" "strings" "github.com/prometheus/procfs/internal/util" @@ -28,9 +27,13 @@ import ( // and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 + Searched uint64 Found uint64 + New uint64 Invalid uint64 Ignore uint64 + Delete uint64 + DeleteList uint64 Insert uint64 InsertFailed uint64 Drop uint64 @@ -81,73 +84,34 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { // Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) + entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, err + return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err) } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err + numEntries := len(entries) + if numEntries < 16 || numEntries > 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries) } - entry.InsertFailed = insertFailed - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err + stats := &ConntrackStatEntry{ + Entries: *entries[0], + Searched: *entries[1], + Found: *entries[2], + New: *entries[3], + Invalid: *entries[4], + Ignore: *entries[5], + Delete: *entries[6], + DeleteList: *entries[7], + Insert: *entries[8], + InsertFailed: *entries[9], + Drop: *entries[10], + EarlyDrop: *entries[11], } - entry.Drop = drop - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err + // Ignore missing search_restart on Linux < 2.6.35. + if numEntries == 17 { + stats.SearchRestart = *entries[16] } - entry.EarlyDrop = earlyDrop - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string. -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err + return stats, nil } diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 06b7b8f2163..540cea52c6f 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -76,6 +76,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { s := bufio.NewScanner(r) var stats []SoftnetStat + cpuIndex := 0 for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) @@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { softnetStat.SoftnetBacklogLen = us[0] softnetStat.Index = us[1] + } else { + // For older kernels, create the Index based on the scan line number. + softnetStat.Index = uint32(cpuIndex) } softnetStat.Width = width stats = append(stats, softnetStat) + cpuIndex++ } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go new file mode 100644 index 00000000000..c80fb154247 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -0,0 +1,182 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Wireless models the content of /proc/net/wireless. +type Wireless struct { + Name string + + // Status is the current 4-digit hex value status of the interface. + Status uint64 + + // QualityLink is the link quality. + QualityLink int + + // QualityLevel is the signal gain (dBm). + QualityLevel int + + // QualityNoise is the signal noise baseline (dBm). + QualityNoise int + + // DiscardedNwid is the number of discarded packets with wrong nwid/essid. + DiscardedNwid int + + // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). + DiscardedCrypt int + + // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. + DiscardedFrag int + + // DiscardedRetry is the number of discarded packets that reached max MAC retries. + DiscardedRetry int + + // DiscardedMisc is the number of discarded packets for other reasons. + DiscardedMisc int + + // MissedBeacon is the number of missed beacons/superframe. + MissedBeacon int +} + +// Wireless returns kernel wireless statistics. +func (fs FS) Wireless() ([]*Wireless, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) + if err != nil { + return nil, err + } + + m, err := parseWireless(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse wireless: %w", err) + } + + return m, nil +} + +// parseWireless parses the contents of /proc/net/wireless. +/* +Inter-| sta-| Quality | Discarded packets | Missed | WE +face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + eth1: 0000 5. -256. -10. 0 1 0 3 0 0 + eth2: 0000 5. -256. -20. 0 2 0 4 0 0 +*/ +func parseWireless(r io.Reader) ([]*Wireless, error) { + var ( + interfaces []*Wireless + scanner = bufio.NewScanner(r) + ) + + for n := 0; scanner.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line := scanner.Text() + + parts := strings.Split(line, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line) + } + + name := strings.TrimSpace(parts[0]) + stats := strings.Fields(parts[1]) + + if len(stats) < 10 { + return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line) + } + + status, err := strconv.ParseUint(stats[0], 16, 16) + if err != nil { + return nil, fmt.Errorf("invalid status in line %d: %q", n, line) + } + + qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) + if err != nil { + return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err) + } + + qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) + if err != nil { + return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err) + } + + qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) + if err != nil { + return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err) + } + + dnwid, err := strconv.Atoi(stats[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err) + } + + dcrypt, err := strconv.Atoi(stats[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err) + } + + dfrag, err := strconv.Atoi(stats[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err) + } + + dretry, err := strconv.Atoi(stats[7]) + if err != nil { + return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err) + } + + dmisc, err := strconv.Atoi(stats[8]) + if err != nil { + return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err) + } + + mbeacon, err := strconv.Atoi(stats[9]) + if err != nil { + return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err) + } + + w := &Wireless{ + Name: name, + Status: status, + QualityLink: qlink, + QualityLevel: qlevel, + QualityNoise: qnoise, + DiscardedNwid: dnwid, + DiscardedCrypt: dcrypt, + DiscardedFrag: dfrag, + DiscardedRetry: dretry, + DiscardedMisc: dmisc, + MissedBeacon: mbeacon, + } + + interfaces = append(interfaces, w) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err) + } + + return interfaces, nil +} diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 5cc40aef55b..742dff453ba 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,7 +15,6 @@ package procfs import ( "bufio" - "io" "os" "path/filepath" "strconv" @@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) { var netStatsTotal []NetStat for _, filePath := range statFiles { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - - procNetstat, err := parseNetstat(file) + procNetstat, err := parseNetstat(filePath) if err != nil { return nil, err } @@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) { // parseNetstat parses the metrics from `/proc/net/stat/` file // and returns a NetStat structure. -func parseNetstat(r io.Reader) (NetStat, error) { - var ( - scanner = bufio.NewScanner(r) - netStat = NetStat{ - Stats: make(map[string][]uint64), - } - ) +func parseNetstat(filePath string) (NetStat, error) { + netStat := NetStat{ + Stats: make(map[string][]uint64), + } + file, err := os.Open(filePath) + if err != nil { + return netStat, err + } + defer file.Close() + scanner := bufio.NewScanner(file) scanner.Scan() // First string is always a header for stats diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index c30223af72a..48f39dafd2a 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -21,7 +21,6 @@ import ( "strconv" "strings" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -30,7 +29,7 @@ type Proc struct { // The process ID. PID int - fs fs.FS + fs FS } // Procs represents a list of Proc structs. @@ -92,7 +91,7 @@ func (fs FS) Proc(pid int) (Proc, error) { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs.proc}, nil + return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. @@ -114,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) + p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil @@ -237,6 +236,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // FileDescriptorsLen returns the number of currently open file descriptors of // a process. func (p Proc) FileDescriptorsLen() (int, error) { + // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 + if p.fs.real { + stat, err := os.Stat(p.path("fd")) + if err != nil { + return 0, err + } + + size := stat.Size() + if size > 0 { + return int(size), nil + } + } + fds, err := p.fileDescriptors() if err != nil { return 0, err @@ -285,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { } func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) + return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } // FileDescriptorsInfo retrieves information about all file descriptors of diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index b278eb2c2df..14b249f4fc6 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,7 +18,6 @@ import ( "fmt" "os" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -112,7 +111,7 @@ type ProcStat struct { // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 - proc fs.FS + proc FS } // NewStat returns the current status information of the process. @@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() + stat, err := s.proc.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 3d8c06439a9..c055d075db0 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "sort" "strconv" "strings" @@ -76,6 +77,9 @@ type ProcStatus struct { UIDs [4]string // GIDs of the process (Real, effective, saved set, and filesystem GIDs) GIDs [4]string + + // CpusAllowedList: List of cpu cores processes are allowed to run on. + CpusAllowedList []uint64 } // NewStatus returns the current status information of the process. @@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.VoluntaryCtxtSwitches = vUint case "nonvoluntary_ctxt_switches": s.NonVoluntaryCtxtSwitches = vUint + case "Cpus_allowed_list": + s.CpusAllowedList = calcCpusAllowedList(vString) } + } // TotalCtxtSwitches returns the total context switch. func (s ProcStatus) TotalCtxtSwitches() uint64 { return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches } + +func calcCpusAllowedList(cpuString string) []uint64 { + s := strings.Split(cpuString, ",") + + var g []uint64 + + for _, cpu := range s { + // parse cpu ranges, example: 1-3=[1,2,3] + if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { + startCPU, _ := strconv.ParseUint(l[0], 10, 64) + endCPU, _ := strconv.ParseUint(l[1], 10, 64) + + for i := startCPU; i <= endCPU; i++ { + g = append(g, i) + } + } else if len(l) == 1 { + cpu, _ := strconv.ParseUint(l[0], 10, 64) + g = append(g, cpu) + } + + } + + sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + return g +} diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index f08bfc769db..490c14708d4 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) { if err != nil { continue } - t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}}) } return t, nil @@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) { if _, err := os.Stat(taskPath); err != nil { return Proc{}, err } - return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil } // Thread returns a process for a given TID of Proc. func (proc Proc) Thread(tid int) (Proc, error) { - tfs := fsi.FS(proc.path("task")) - if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + tfs := FS{fsi.FS(proc.path("task")), proc.fs.real} + if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { return Proc{}, err } return Proc{PID: tid, fs: tfs}, nil diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index b042c896f25..d1d4a85fd75 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1b65..074fd4b8bd7 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function for scanner.Scan() { - printFunc(scanner.Text()) + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 439d3e1de4e..2578d94b5eb 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -1,4 +1,4 @@ -# Copyright 2013-2022 The Cobra Authors +# Copyright 2013-2023 The Cobra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index c433a01bced..0da8d7aa08a 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -5,10 +5,6 @@ ifeq (, $(shell which golangci-lint)) $(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") endif -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest") -endif - .PHONY: fmt lint test install_deps clean default: all @@ -25,6 +21,10 @@ lint: test: install_deps $(info ******************** running tests ********************) + go test -v ./... + +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) richgo test -v ./... install_deps: diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 7cc726beb42..592c0b8ab05 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,4 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) +![cobra logo](assets/CobraMain.png) Cobra is a library for creating powerful modern CLI applications. @@ -6,7 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 95e03aecb60..2d0239437a8 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 2c1f99e7870..e79ec33a81d 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( type PositionalArgs func(cmd *Command, args []string) error -// Legacy arg validation has the following behaviour: +// legacyArgs validation has the following behaviour: // - root commands with no subcommands can take arbitrary arguments // - root commands with subcommands will do subcommand validity checking // - subcommands will always accept arbitrary arguments diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 3acdb27974e..10c78847de2 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -532,7 +532,7 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { } } -// Setup annotations for go completions for registered flags +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { flagCompletionMutex.RLock() defer flagCompletionMutex.RUnlock() diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index bb4b71892c2..19b09560c1e 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ func genBashComp(buf io.StringWriter, name string, includeDesc bool) { __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -65,7 +65,7 @@ __%[1]s_get_completion_results() { lastChar=${lastParam:$((${#lastParam}-1)):1} __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + if [[ -z ${cur} && ${lastChar} != = ]]; then # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" @@ -75,7 +75,7 @@ __%[1]s_get_completion_results() { # When completing a flag with an = (e.g., %[1]s -n=) # bash focuses on the part after the =, so we need to remove # the flag part from $cur - if [[ "${cur}" == -*=* ]]; then + if [[ ${cur} == -*=* ]]; then cur="${cur#*=}" fi @@ -87,7 +87,7 @@ __%[1]s_get_completion_results() { directive=${out##*:} # Remove the directive out=${out%%:*} - if [ "${directive}" = "${out}" ]; then + if [[ ${directive} == "${out}" ]]; then # There is not directive specified directive=0 fi @@ -101,22 +101,36 @@ __%[1]s_process_completion_results() { local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + if (((directive & shellCompDirectiveError) != 0)); then # Error code. No completion. __%[1]s_debug "Received error from custom completion go code" return else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no space" compopt -o nospace else __%[1]s_debug "No space directive not supported in this version of bash" fi fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no file completion" compopt +o default else @@ -130,7 +144,7 @@ __%[1]s_process_completion_results() { local activeHelp=() __%[1]s_extract_activeHelp - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering local fullFilter filter filteringCmd @@ -143,13 +157,12 @@ __%[1]s_process_completion_results() { filteringCmd="_filedir $fullFilter" __%[1]s_debug "File filtering command: $filteringCmd" $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then # File completion for directories only - # Use printf to strip any trailing newline local subdir - subdir=$(printf "%%s" "${completions[0]}") - if [ -n "$subdir" ]; then + subdir=${completions[0]} + if [[ -n $subdir ]]; then __%[1]s_debug "Listing directories in $subdir" pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return else @@ -164,7 +177,7 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish - if [ ${#activeHelp[*]} -ne 0 ]; then + if ((${#activeHelp[*]} != 0)); then printf "\n"; printf "%%s\n" "${activeHelp[@]}" printf "\n" @@ -184,21 +197,21 @@ __%[1]s_process_completion_results() { # Separate activeHelp lines from real completions. # Fills the $activeHelp and $completions arrays. __%[1]s_extract_activeHelp() { - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do - if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" - if [ -n "$comp" ]; then + if [[ -n $comp ]]; then activeHelp+=("$comp") fi else # Not an activeHelp line but a normal completion completions+=("$comp") fi - done < <(printf "%%s\n" "${out}") + done <<<"${out}" } __%[1]s_handle_completion_types() { @@ -254,7 +267,7 @@ __%[1]s_handle_standard_completion_case() { done < <(printf "%%s\n" "${completions[@]}") # If there is a single completion left, remove the description text - if [ ${#COMPREPLY[*]} -eq 1 ]; then + if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" comp="${COMPREPLY[0]%%%%$tab*}" __%[1]s_debug "Removed description from single completion, which is now: ${comp}" @@ -271,8 +284,8 @@ __%[1]s_handle_special_char() if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then local word=${comp%%"${comp##*${char}}"} local idx=${#COMPREPLY[*]} - while [[ $((--idx)) -ge 0 ]]; do - COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} done fi } @@ -298,7 +311,7 @@ __%[1]s_format_comp_descriptions() # Make sure we can fit a description of at least 8 characters # if we are to align the descriptions. - if [[ $maxdesclength -gt 8 ]]; then + if ((maxdesclength > 8)); then # Add the proper number of spaces to align the descriptions for ((i = ${#comp} ; i < longest ; i++)); do comp+=" " @@ -310,8 +323,8 @@ __%[1]s_format_comp_descriptions() # If there is enough space for any description text, # truncate the descriptions that are too long for the shell width - if [ $maxdesclength -gt 0 ]; then - if [ ${#desc} -gt $maxdesclength ]; then + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then desc=${desc:0:$(( maxdesclength - 1 ))} desc+="…" fi @@ -332,9 +345,9 @@ __start_%[1]s() # Call _init_completion from the bash-completion package # to prepare the arguments properly if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n "=:" || return + _init_completion -n =: || return else - __%[1]s_init_completion -n "=:" || return + __%[1]s_init_completion -n =: || return fi __%[1]s_debug @@ -361,7 +374,7 @@ fi # ex: ts=4 sw=4 et filetype=sh `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index fe44bc8a07e..b07b44a0ce2 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,8 +167,8 @@ func appendIfNotPresent(s, stringToAppend string) string { // rpad adds padding to the right of a string. func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) } // tmpl executes the given template text on data, writing the result to w. diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 6ff47dd5c35..01f7c6f1c5e 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist -// Structure to manage groups for commands +// Group Structure to manage groups for commands type Group struct { ID string Title string @@ -47,7 +47,7 @@ type Group struct { // definition to ensure usability. type Command struct { // Use is the one-line usage message. - // Recommended syntax is as follow: + // Recommended syntax is as follows: // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. // ... indicates that you can specify multiple values for the previous argument. // | indicates mutually exclusive information. You can use the argument to the left of the separator or the @@ -321,7 +321,7 @@ func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } -// SetHelpCommandGroup sets the group id of the help command. +// SetHelpCommandGroupID sets the group id of the help command. func (c *Command) SetHelpCommandGroupID(groupID string) { if c.helpCommand != nil { c.helpCommand.GroupID = groupID @@ -330,7 +330,7 @@ func (c *Command) SetHelpCommandGroupID(groupID string) { c.helpCommandGroupID = groupID } -// SetCompletionCommandGroup sets the group id of the completion command. +// SetCompletionCommandGroupID sets the group id of the completion command. func (c *Command) SetCompletionCommandGroupID(groupID string) { // completionCommandGroupID is used if no completion command is defined by the user c.Root().completionCommandGroupID = groupID @@ -655,20 +655,44 @@ Loop: // argsMinusFirstX removes only the first x from args. Otherwise, commands that look like // openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := []string{} + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } } } return args } func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || + return ((len(arg) >= 3 && arg[0:2] == "--") || (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) } @@ -686,7 +710,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) { cmd := c.findNext(nextSubCmd) if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) } return c, innerArgs } @@ -1272,7 +1296,7 @@ func (c *Command) AllChildCommandsHaveGroup() bool { return true } -// ContainGroups return if groupID exists in the list of command groups. +// ContainsGroup return if groupID exists in the list of command groups. func (c *Command) ContainsGroup(groupID string) bool { for _, x := range c.commandgroups { if x.ID == groupID { diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 2b77f8f0190..307f0c127fd 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 520f23abf09..adbef395c25 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index e8a0206db10..ee38c4d0b86 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,10 @@ const ( // obtain the same behavior but only for flags. ShellCompDirectiveFilterDirs + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + // =========================================================================== // All directives using iota should be above this one. @@ -159,6 +163,9 @@ func (d ShellCompDirective) string() string { if d&ShellCompDirectiveFilterDirs != 0 { directives = append(directives, "ShellCompDirectiveFilterDirs") } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } if len(directives) == 0 { directives = append(directives, "ShellCompDirectiveDefault") } @@ -169,7 +176,7 @@ func (d ShellCompDirective) string() string { return strings.Join(directives, ", ") } -// Adds a special hidden command that can be used to request custom completions. +// initCompleteCmd adds a special hidden command that can be used to request custom completions. func (c *Command) initCompleteCmd(args []string) { completeCmd := &Command{ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), @@ -727,7 +734,7 @@ to enable it. You can execute the following once: To load completions in your current shell session: - source <(%[1]s completion zsh); compdef _%[1]s %[1]s + source <(%[1]s completion zsh) To load completions for every new session, execute once: diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 97112a17b29..12ca0d2b11c 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ function __%[1]s_perform_completion __%[1]s_debug "last arg: $lastArg" # Disable ActiveHelp which is not supported for fish shell - set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" __%[1]s_debug "Calling $requestComp" set -l results (eval $requestComp 2> /dev/null) @@ -89,6 +89,60 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" + + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 + end + + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" + return 1 + end + + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + # This function does two things: # - Obtain the completions and store them in the global __%[1]s_comp_results # - Return false if file completion should be performed @@ -99,17 +153,17 @@ function __%[1]s_prepare_completions # Start fresh set --erase __%[1]s_comp_results - set -l results (__%[1]s_perform_completion) - __%[1]s_debug "Completion results: $results" + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" - if test -z "$results" + if test -z "$__%[1]s_perform_completion_once_result" __%[1]s_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps return 1 end - set -l directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" @@ -205,13 +259,17 @@ end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' # The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results # which provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } // GenFishCompletion generates fish completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 9c377aaf9c9..b35fde15548 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 004de42e41e..177d2755f21 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,7 @@ filter __%[1]s_escapeStringWithSpecialChars { $ShellCompDirectiveNoFileComp=%[6]d $ShellCompDirectiveFilterFileExt=%[7]d $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. @@ -106,13 +107,22 @@ filter __%[1]s_escapeStringWithSpecialChars { # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } } __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[9]s=0 + $env:%[10]s=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -137,7 +147,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } $Longest = 0 - $Values = $Out | ForEach-Object { + [Array]$Values = $Out | ForEach-Object { #Split the output in name and description `+" $Name, $Description = $_.Split(\"`t\",2)"+` __%[1]s_debug "Name: $Name Description: $Description" @@ -182,6 +192,11 @@ filter __%[1]s_escapeStringWithSpecialChars { } } + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { __%[1]s_debug "ShellCompDirectiveNoFileComp is called" @@ -267,7 +282,7 @@ filter __%[1]s_escapeStringWithSpecialChars { Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index 6865f88e79a..8a291eb20e8 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -1,11 +1,13 @@ ## Projects using Cobra - [Allero](https://github.com/allero-io/allero) +- [Arewefastyet](https://benchmark.vitess.io) - [Arduino CLI](https://github.com/arduino/arduino-cli) - [Bleve](https://blevesearch.com/) - [Cilium](https://cilium.io/) - [CloudQuery](https://github.com/cloudquery/cloudquery) - [CockroachDB](https://www.cockroachlabs.com/) +- [Constellation](https://github.com/edgelesssys/constellation) - [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) - [Datree](https://github.com/datreeio/datree) - [Delve](https://github.com/derekparker/delve) @@ -25,7 +27,7 @@ - [Istio](https://istio.io) - [Kool](https://github.com/kool-dev/kool) - [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/armosec/kubescape) +- [Kubescape](https://github.com/kubescape/kubescape) - [KubeVirt](https://github.com/kubevirt/kubevirt) - [Linkerd](https://linkerd.io/) - [Mattermost-server](https://github.com/mattermost/mattermost-server) @@ -51,10 +53,12 @@ - [Random](https://github.com/erdaltsksn/random) - [Rclone](https://rclone.org/) - [Scaleway CLI](https://github.com/scaleway/scaleway-cli) +- [Sia](https://github.com/SiaFoundation/siad) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) - [Twitch CLI](https://github.com/twitchdev/twitch-cli) - [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- [Vitess](https://vitess.io) - VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) - [Werf](https://werf.io/) - [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go index 126e83c307e..b035742d399 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index 553ee5df8a7..065c0621d4c 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -71,7 +71,7 @@ PowerShell: `,cmd.Root().Name()), DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": @@ -162,16 +162,7 @@ cmd := &cobra.Command{ } ``` -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. +The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. ### Dynamic completion of nouns @@ -237,6 +228,10 @@ ShellCompDirectiveFilterFileExt // return []string{"themes"}, ShellCompDirectiveFilterDirs // ShellCompDirectiveFilterDirs + +// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order +// in which the completions are provided +ShellCompDirectiveKeepOrder ``` ***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. @@ -385,6 +380,19 @@ or ```go ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} ``` + +If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: + +```bash +$ source <(helm completion bash) +$ helm completion [tab][tab] +bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) +fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) + +$ source <(helm completion bash --no-descriptions) +$ helm completion [tab][tab] +bash fish powershell zsh +``` ## Bash completions ### Dependencies diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md index e55367e853f..85201d840c8 100644 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -188,6 +188,37 @@ var versionCmd = &cobra.Command{ } ``` +### Organizing subcommands + +A command may have subcommands which in turn may have other subcommands. This is achieved by using +`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in +its own go package. + +The suggested approach is for the parent command to use `AddCommand` to add its most immediate +subcommands. For example, consider the following directory structure: + +```text +├── cmd +│   ├── root.go +│   └── sub1 +│   ├── sub1.go +│   └── sub2 +│   ├── leafA.go +│   ├── leafB.go +│   └── sub2.go +└── main.go +``` + +In this case: + +* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. +* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. +* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the + sub2 command. + +This approach ensures the subcommands are always included at compile time while avoiding cyclic +references. + ### Returning and handling errors If you wish to return an error to the caller of a command, `RunE` can be used. @@ -313,8 +344,8 @@ rootCmd.MarkFlagsRequiredTogether("username", "password") You can also prevent different flags from being provided together if they represent mutually exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: ```go -rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML") +rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") +rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") ``` @@ -349,7 +380,7 @@ shown below: ```go var cmd = &cobra.Command{ Short: "hello", - Args: MatchAll(ExactArgs(2), OnlyValidArgs), + Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { fmt.Println("Hello, World!") }, diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 84cec76fde3..1856e4c7f68 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -90,6 +90,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) { compCmd = ShellCompNoDescRequestCmd } WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- @@ -108,8 +109,9 @@ _%[1]s() local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -177,7 +179,7 @@ _%[1]s() return fi - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} local startIndex=$((${#activeHelpMarker}+1)) local hasActiveHelp=0 @@ -227,6 +229,11 @@ _%[1]s() noSpace="-S ''" fi + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -262,7 +269,7 @@ _%[1]s() return $result else __%[1]s_debug "Calling _describe" - if eval _describe "completions" completions $flagPrefix $noSpace; then + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then __%[1]s_debug "_describe found some completions" # Return the success of having called _describe @@ -296,6 +303,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d86..95d8e59da69 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -352,9 +352,9 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -364,10 +364,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -377,9 +377,9 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -389,10 +389,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -402,8 +402,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -414,8 +414,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c790b..7880b8f9433 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -22,9 +22,9 @@ func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bo // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -56,7 +56,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -66,7 +66,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) boo // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// assert.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -81,8 +81,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // EqualErrorf asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -90,27 +90,10 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) } -// EqualExportedValuesf asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false -func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,10 +103,10 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -143,8 +126,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -164,7 +147,7 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -172,34 +155,9 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) } -// EventuallyWithTf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -225,7 +183,7 @@ func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{} // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -244,9 +202,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -256,10 +214,10 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -270,7 +228,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -283,7 +241,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -295,7 +253,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -307,7 +265,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -319,7 +277,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { @@ -331,7 +289,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -343,7 +301,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -353,7 +311,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -395,9 +353,9 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -407,9 +365,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -419,9 +377,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -431,9 +389,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -451,7 +409,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -462,7 +420,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -472,9 +430,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -484,10 +442,10 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -497,8 +455,8 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -509,7 +467,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -519,7 +477,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -538,10 +496,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool // NoErrorf asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -561,9 +519,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) boo // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -574,9 +532,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -586,7 +544,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -599,7 +557,7 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -618,7 +576,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -628,7 +586,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bo // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -638,8 +596,8 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -649,7 +607,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -663,7 +621,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -681,7 +639,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -693,7 +651,7 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -704,7 +662,7 @@ func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -714,8 +672,8 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -725,8 +683,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -736,7 +694,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -750,7 +708,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -760,7 +718,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -770,7 +728,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -780,7 +738,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec53c..339515b8bfb 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -30,9 +30,9 @@ func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{} // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -43,9 +43,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -98,7 +98,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// a.Empty(obj) +// a.Empty(obj) func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -109,7 +109,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// a.Emptyf(obj, "error message %s", "formatted") +// a.Emptyf(obj, "error message %s", "formatted") func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -119,7 +119,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) // Equal asserts that two objects are equal. // -// a.Equal(123, 123) +// a.Equal(123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -134,8 +134,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -146,8 +146,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... // EqualErrorf asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -155,44 +155,10 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a return EqualErrorf(a.t, theError, errString, msg, args...) } -// EqualExportedValues asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true -// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false -func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualExportedValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualExportedValuesf asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false -func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualExportedValuesf(a.t, expected, actual, msg, args...) -} - // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValues(uint32(123), int32(123)) +// a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -203,7 +169,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") +// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -213,7 +179,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg // Equalf asserts that two objects are equal. // -// a.Equalf(123, 123, "error message %s", "formatted") +// a.Equalf(123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -227,10 +193,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -259,8 +225,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. // ErrorContains asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// a.ErrorContains(err, expectedErrorSubString) +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -271,8 +237,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -300,10 +266,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -314,7 +280,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -322,60 +288,10 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) } -// EventuallyWithT asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// a.EventuallyWithT(func(c *assert.CollectT) { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// EventuallyWithTf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) -} - // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -385,7 +301,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t // Exactly asserts that two objects are equal in value and type. // -// a.Exactly(int32(123), int64(123)) +// a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -395,7 +311,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") +// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -437,7 +353,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ // False asserts that the specified value is false. // -// a.False(myBool) +// a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -447,7 +363,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { // Falsef asserts that the specified value is false. // -// a.Falsef(myBool, "error message %s", "formatted") +// a.Falsef(myBool, "error message %s", "formatted") func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -475,9 +391,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b // Greater asserts that the first element is greater than the second // -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -487,10 +403,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -500,10 +416,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -513,9 +429,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") -// a.Greaterf("b", "a", "error message %s", "formatted") +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +// a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -526,7 +442,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { @@ -539,7 +455,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -552,7 +468,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { @@ -565,7 +481,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -577,7 +493,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin // HTTPError asserts that a specified handler returns an error status code. // -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -589,7 +505,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // HTTPErrorf asserts that a specified handler returns an error status code. // -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -601,7 +517,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -613,7 +529,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -625,7 +541,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) +// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { @@ -637,7 +553,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { @@ -649,7 +565,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur // HTTPSuccess asserts that a specified handler returns a success status code. // -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -661,7 +577,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st // HTTPSuccessf asserts that a specified handler returns a success status code. // -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -673,7 +589,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s // Implements asserts that an object is implemented by the specified interface. // -// a.Implements((*MyInterface)(nil), new(MyObject)) +// a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -683,7 +599,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -693,7 +609,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, 22/7.0, 0.01) +// a.InDelta(math.Pi, 22/7.0, 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -735,7 +651,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -777,9 +693,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo // IsDecreasing asserts that the collection is decreasing // -// a.IsDecreasing([]int{2, 1, 0}) -// a.IsDecreasing([]float{2, 1}) -// a.IsDecreasing([]string{"b", "a"}) +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -789,9 +705,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) // IsDecreasingf asserts that the collection is decreasing // -// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") -// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -801,9 +717,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter // IsIncreasing asserts that the collection is increasing // -// a.IsIncreasing([]int{1, 2, 3}) -// a.IsIncreasing([]float{1, 2}) -// a.IsIncreasing([]string{"a", "b"}) +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -813,9 +729,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) // IsIncreasingf asserts that the collection is increasing // -// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") -// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -825,9 +741,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter // IsNonDecreasing asserts that the collection is not decreasing // -// a.IsNonDecreasing([]int{1, 1, 2}) -// a.IsNonDecreasing([]float{1, 2}) -// a.IsNonDecreasing([]string{"a", "b"}) +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -837,9 +753,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface // IsNonDecreasingf asserts that the collection is not decreasing // -// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -849,9 +765,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in // IsNonIncreasing asserts that the collection is not increasing // -// a.IsNonIncreasing([]int{2, 1, 1}) -// a.IsNonIncreasing([]float{2, 1}) -// a.IsNonIncreasing([]string{"b", "a"}) +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -861,9 +777,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface // IsNonIncreasingf asserts that the collection is not increasing // -// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -889,7 +805,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s // JSONEq asserts that two JSON strings are equivalent. // -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -899,7 +815,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf // JSONEqf asserts that two JSON strings are equivalent. // -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -910,7 +826,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// a.Len(mySlice, 3) +// a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -921,7 +837,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// a.Lenf(mySlice, 3, "error message %s", "formatted") +// a.Lenf(mySlice, 3, "error message %s", "formatted") func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -931,9 +847,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in // Less asserts that the first element is less than the second // -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -943,10 +859,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac // LessOrEqual asserts that the first element is less than or equal to the second // -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -956,10 +872,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i // LessOrEqualf asserts that the first element is less than or equal to the second // -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -969,9 +885,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") -// a.Lessf("a", "b", "error message %s", "formatted") +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +// a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -981,8 +897,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i // Negative asserts that the specified element is negative // -// a.Negative(-1) -// a.Negative(-1.23) +// a.Negative(-1) +// a.Negative(-1.23) func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -992,8 +908,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { // Negativef asserts that the specified element is negative // -// a.Negativef(-1, "error message %s", "formatted") -// a.Negativef(-1.23, "error message %s", "formatted") +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1004,7 +920,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) b // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1015,7 +931,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1025,7 +941,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t // Nil asserts that the specified object is nil. // -// a.Nil(err) +// a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1035,7 +951,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { // Nilf asserts that the specified object is nil. // -// a.Nilf(err, "error message %s", "formatted") +// a.Nilf(err, "error message %s", "formatted") func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1063,10 +979,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) // NoError asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1076,10 +992,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { // NoErrorf asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1108,9 +1024,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1121,9 +1037,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1134,9 +1050,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1147,9 +1063,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1159,7 +1075,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface // NotEqual asserts that the specified values are NOT equal. // -// a.NotEqual(obj1, obj2) +// a.NotEqual(obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1172,7 +1088,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// a.NotEqualValues(obj1, obj2) +// a.NotEqualValues(obj1, obj2) func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1182,7 +1098,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") +// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1192,7 +1108,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m // NotEqualf asserts that the specified values are NOT equal. // -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1223,7 +1139,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in // NotNil asserts that the specified object is not nil. // -// a.NotNil(err) +// a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1233,7 +1149,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool // NotNilf asserts that the specified object is not nil. // -// a.NotNilf(err, "error message %s", "formatted") +// a.NotNilf(err, "error message %s", "formatted") func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1243,7 +1159,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanics(func(){ RemainCalm() }) +// a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1253,7 +1169,7 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1263,8 +1179,8 @@ func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{} // NotRegexp asserts that a specified regexp does not match a string. // -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1274,8 +1190,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1285,7 +1201,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg // NotSame asserts that two pointers do not reference the same object. // -// a.NotSame(ptr1, ptr2) +// a.NotSame(ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1298,7 +1214,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg // NotSamef asserts that two pointers do not reference the same object. // -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1312,7 +1228,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1323,7 +1239,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1349,7 +1265,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bo // Panics asserts that the code inside the specified PanicTestFunc panics. // -// a.Panics(func(){ GoCrazy() }) +// a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1361,7 +1277,7 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1373,7 +1289,7 @@ func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndAr // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1384,7 +1300,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg str // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1395,7 +1311,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgA // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1405,7 +1321,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1415,8 +1331,8 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b // Positive asserts that the specified element is positive // -// a.Positive(1) -// a.Positive(1.23) +// a.Positive(1) +// a.Positive(1.23) func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1426,8 +1342,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { // Positivef asserts that the specified element is positive // -// a.Positivef(1, "error message %s", "formatted") -// a.Positivef(1.23, "error message %s", "formatted") +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1437,8 +1353,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) b // Regexp asserts that a specified regexp matches a string. // -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1448,8 +1364,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1459,7 +1375,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . // Same asserts that two pointers reference the same object. // -// a.Same(ptr1, ptr2) +// a.Same(ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1472,7 +1388,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . // Samef asserts that two pointers reference the same object. // -// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// a.Samef(ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1486,7 +1402,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1497,7 +1413,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1507,7 +1423,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a // True asserts that the specified value is true. // -// a.True(myBool) +// a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1517,7 +1433,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { // Truef asserts that the specified value is true. // -// a.Truef(myBool, "error message %s", "formatted") +// a.Truef(myBool, "error message %s", "formatted") func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1527,7 +1443,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { // WithinDuration asserts that the two times are within duration delta of each other. // -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1537,7 +1453,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta // WithinDurationf asserts that the two times are within duration delta of each other. // -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1547,7 +1463,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta // WithinRange asserts that a time is within a time range (inclusive). // -// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,7 +1473,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim // WithinRangef asserts that a time is within a time range (inclusive). // -// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0599..75944878358 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -46,36 +46,36 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba926..2924cf3a149 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -75,77 +75,6 @@ func ObjectsAreEqual(expected, actual interface{}) bool { return bytes.Equal(exp, act) } -// copyExportedFields iterates downward through nested data structures and creates a copy -// that only contains the exported struct fields. -func copyExportedFields(expected interface{}) interface{} { - if isNil(expected) { - return expected - } - - expectedType := reflect.TypeOf(expected) - expectedKind := expectedType.Kind() - expectedValue := reflect.ValueOf(expected) - - switch expectedKind { - case reflect.Struct: - result := reflect.New(expectedType).Elem() - for i := 0; i < expectedType.NumField(); i++ { - field := expectedType.Field(i) - isExported := field.IsExported() - if isExported { - fieldValue := expectedValue.Field(i) - if isNil(fieldValue) || isNil(fieldValue.Interface()) { - continue - } - newValue := copyExportedFields(fieldValue.Interface()) - result.Field(i).Set(reflect.ValueOf(newValue)) - } - } - return result.Interface() - - case reflect.Ptr: - result := reflect.New(expectedType.Elem()) - unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface()) - result.Elem().Set(reflect.ValueOf(unexportedRemoved)) - return result.Interface() - - case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) - for i := 0; i < expectedValue.Len(); i++ { - index := expectedValue.Index(i) - if isNil(index) { - continue - } - unexportedRemoved := copyExportedFields(index.Interface()) - result.Index(i).Set(reflect.ValueOf(unexportedRemoved)) - } - return result.Interface() - - case reflect.Map: - result := reflect.MakeMap(expectedType) - for _, k := range expectedValue.MapKeys() { - index := expectedValue.MapIndex(k) - unexportedRemoved := copyExportedFields(index.Interface()) - result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved)) - } - return result.Interface() - - default: - return expected - } -} - -// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are -// considered equal. This comparison of only exported fields is applied recursively to nested data -// structures. -// -// This function does no assertion of any kind. -func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { - expectedCleaned := copyExportedFields(expected) - actualCleaned := copyExportedFields(actual) - return ObjectsAreEqualValues(expectedCleaned, actualCleaned) -} - // ObjectsAreEqualValues gets whether two objects are equal, or if their // values are equal. func ObjectsAreEqualValues(expected, actual interface{}) bool { @@ -342,7 +271,7 @@ type labeledContent struct { // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: // -// \t{{label}}:{{align_spaces}}\t{{content}}\n +// \t{{label}}:{{align_spaces}}\t{{content}}\n // // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this @@ -365,7 +294,7 @@ func labeledOutput(content ...labeledContent) string { // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -397,7 +326,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// assert.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -438,7 +367,7 @@ func validateEqualArgs(expected, actual interface{}) error { // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// assert.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -458,7 +387,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// assert.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -526,7 +455,7 @@ func truncatingFormat(data interface{}) string { // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -544,53 +473,9 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } -// EqualExportedValues asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false -func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - - expected = copyExportedFields(expected) - actual = copyExportedFields(actual) - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true -} - // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -609,7 +494,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { if !isNil(object) { return true @@ -655,7 +540,7 @@ func isNil(object interface{}) bool { // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { if isNil(object) { return true @@ -698,7 +583,7 @@ func isEmpty(object interface{}) bool { // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -715,9 +600,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := !isEmpty(object) if !pass { @@ -746,7 +631,7 @@ func getLen(x interface{}) (ok bool, length int) { // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -764,7 +649,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // True asserts that the specified value is true. // -// assert.True(t, myBool) +// assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { if !value { if h, ok := t.(tHelper); ok { @@ -779,7 +664,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { if value { if h, ok := t.(tHelper); ok { @@ -794,7 +679,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// assert.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -817,7 +702,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// assert.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -876,9 +761,9 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -899,9 +784,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -909,10 +794,10 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) ok, found := containsElement(s, contains) if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } if found { - return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) } return true @@ -922,7 +807,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -978,7 +863,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1163,7 +1048,7 @@ func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1179,7 +1064,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1200,7 +1085,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1220,7 +1105,7 @@ func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs . // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1235,7 +1120,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,7 +1136,7 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1310,7 +1195,7 @@ func toFloat(x interface{}) (float64, bool) { // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1483,10 +1368,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // NoError asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { if err != nil { if h, ok := t.(tHelper); ok { @@ -1500,10 +1385,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1518,8 +1403,8 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1541,8 +1426,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // ErrorContains asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1575,8 +1460,8 @@ func matchRegexp(rx interface{}, str interface{}) bool { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1593,8 +1478,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1706,7 +1591,7 @@ func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1829,7 +1714,7 @@ type tHelper interface { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1859,93 +1744,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } } -// CollectT implements the TestingT interface and collects all errors. -type CollectT struct { - errors []error -} - -// Errorf collects the error. -func (c *CollectT) Errorf(format string, args ...interface{}) { - c.errors = append(c.errors, fmt.Errorf(format, args...)) -} - -// FailNow panics. -func (c *CollectT) FailNow() { - panic("Assertion failed") -} - -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil -} - -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } -} - -// EventuallyWithT asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - collect := new(CollectT) - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - collect.Copy(t) - return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - collect.Reset() - go func() { - condition(collect) - ch <- len(collect.errors) == 0 - }() - case v := <-ch: - if v { - return true - } - tick = ticker.C - } - } -} - // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d387..c9dccc4d6cd 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,40 +1,39 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // -// # Example Usage +// Example Usage // // The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) // -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) +// func TestSomething(t *testing.T) { // -// func TestSomething(t *testing.T) { +// var a string = "Hello" +// var b string = "Hello" // -// var a string = "Hello" -// var b string = "Hello" +// assert.Equal(t, a, b, "The two words should be the same.") // -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } +// } // // if you assert many times, use the format below: // -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) // -// func TestSomething(t *testing.T) { -// assert := assert.New(t) +// func TestSomething(t *testing.T) { +// assert := assert.New(t) // -// var a string = "Hello" -// var b string = "Hello" +// var a string = "Hello" +// var b string = "Hello" // -// assert.Equal(a, b, "The two words should be the same.") -// } +// assert.Equal(a, b, "The two words should be the same.") +// } // -// # Assertions +// Assertions // // Assertions allow you to easily write test code, and are global funcs in the `assert` package. // All assertion functions take, as the first argument, the `*testing.T` object provided by the diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a75..4ed341dd289 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -23,7 +23,7 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) ( // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -45,7 +45,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -67,7 +67,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -89,7 +89,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { @@ -124,7 +124,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { @@ -144,7 +144,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 96843472455..169de39221c 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -1,25 +1,24 @@ // Package require implements the same assertions as the `assert` package but // stops test execution when a test fails. // -// # Example Usage +// Example Usage // // The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) // -// import ( -// "testing" -// "github.com/stretchr/testify/require" -// ) +// func TestSomething(t *testing.T) { // -// func TestSomething(t *testing.T) { +// var a string = "Hello" +// var b string = "Hello" // -// var a string = "Hello" -// var b string = "Hello" +// require.Equal(t, a, b, "The two words should be the same.") // -// require.Equal(t, a, b, "The two words should be the same.") +// } // -// } -// -// # Assertions +// Assertions // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 63f85214767..880853f5a2c 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -37,9 +37,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -53,9 +53,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -123,7 +123,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -137,7 +137,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -150,7 +150,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// assert.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -168,8 +168,8 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -183,8 +183,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // EqualErrorf asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -195,50 +195,10 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args t.FailNow() } -// EqualExportedValues asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false -func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.EqualExportedValues(t, expected, actual, msgAndArgs...) { - return - } - t.FailNow() -} - -// EqualExportedValuesf asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false -func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.EqualExportedValuesf(t, expected, actual, msg, args...) { - return - } - t.FailNow() -} - // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -252,7 +212,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -265,7 +225,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// assert.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -282,10 +242,10 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -323,8 +283,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // ErrorContains asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -338,8 +298,8 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -376,10 +336,10 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -393,7 +353,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -404,66 +364,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t t.FailNow() } -// EventuallyWithT asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.EventuallyWithT(t, condition, waitFor, tick, msgAndArgs...) { - return - } - t.FailNow() -} - -// EventuallyWithTf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.EventuallyWithTf(t, condition, waitFor, tick, msg, args...) { - return - } - t.FailNow() -} - // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -476,7 +380,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -489,7 +393,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -546,7 +450,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -559,7 +463,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -596,9 +500,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -611,10 +515,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -627,10 +531,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -643,9 +547,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -659,7 +563,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -675,7 +579,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -691,7 +595,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -707,7 +611,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -722,7 +626,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -737,7 +641,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -752,7 +656,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -767,7 +671,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -782,7 +686,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -797,7 +701,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -812,7 +716,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -827,7 +731,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -842,7 +746,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -855,7 +759,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -868,7 +772,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -925,7 +829,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -982,9 +886,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -997,9 +901,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1012,9 +916,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1027,9 +931,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1042,9 +946,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1057,9 +961,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1072,9 +976,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1087,9 +991,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1124,7 +1028,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1137,7 +1041,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1151,7 +1055,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1165,7 +1069,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1178,9 +1082,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1193,10 +1097,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1209,10 +1113,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1225,9 +1129,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1240,8 +1144,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1254,8 +1158,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1269,7 +1173,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1283,7 +1187,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1296,7 +1200,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1309,7 +1213,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1346,10 +1250,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1362,10 +1266,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1403,9 +1307,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1419,9 +1323,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1435,9 +1339,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1451,9 +1355,9 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1466,7 +1370,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// assert.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1482,7 +1386,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// assert.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1495,7 +1399,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1508,7 +1412,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1548,7 +1452,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1561,7 +1465,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1574,7 +1478,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1587,7 +1491,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1600,8 +1504,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1614,8 +1518,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1628,7 +1532,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// assert.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1644,7 +1548,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1661,7 +1565,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1675,7 +1579,7 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1710,7 +1614,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1725,7 +1629,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1740,7 +1644,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1754,7 +1658,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1768,7 +1672,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1685,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1794,8 +1698,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,8 +1712,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1822,8 +1726,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1836,8 +1740,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1850,7 +1754,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// assert.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1866,7 +1770,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1883,7 +1787,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1897,7 +1801,7 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1910,7 +1814,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1923,7 +1827,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1936,7 +1840,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1949,7 +1853,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1962,7 +1866,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1975,7 +1879,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330a4..960bf6f2cab 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -31,9 +31,9 @@ func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...inte // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -44,9 +44,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -99,7 +99,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// a.Empty(obj) +// a.Empty(obj) func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -110,7 +110,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// a.Emptyf(obj, "error message %s", "formatted") +// a.Emptyf(obj, "error message %s", "formatted") func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) // Equal asserts that two objects are equal. // -// a.Equal(123, 123) +// a.Equal(123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -135,8 +135,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -147,8 +147,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... // EqualErrorf asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -156,44 +156,10 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a EqualErrorf(a.t, theError, errString, msg, args...) } -// EqualExportedValues asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true -// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false -func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - EqualExportedValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualExportedValuesf asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false -func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - EqualExportedValuesf(a.t, expected, actual, msg, args...) -} - // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValues(uint32(123), int32(123)) +// a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -204,7 +170,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") +// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -214,7 +180,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg // Equalf asserts that two objects are equal. // -// a.Equalf(123, 123, "error message %s", "formatted") +// a.Equalf(123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -228,10 +194,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -260,8 +226,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. // ErrorContains asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// a.ErrorContains(err, expectedErrorSubString) +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -272,8 +238,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -301,10 +267,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -315,7 +281,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -323,60 +289,10 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti Eventually(a.t, condition, waitFor, tick, msgAndArgs...) } -// EventuallyWithT asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// a.EventuallyWithT(func(c *assert.CollectT) { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// EventuallyWithTf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) -} - // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -386,7 +302,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t // Exactly asserts that two objects are equal in value and type. // -// a.Exactly(int32(123), int64(123)) +// a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -396,7 +312,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") +// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -438,7 +354,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ // False asserts that the specified value is false. // -// a.False(myBool) +// a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -448,7 +364,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// a.Falsef(myBool, "error message %s", "formatted") +// a.Falsef(myBool, "error message %s", "formatted") func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -476,9 +392,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -488,10 +404,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -501,10 +417,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -514,9 +430,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") -// a.Greaterf("b", "a", "error message %s", "formatted") +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +// a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -527,7 +443,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -540,7 +456,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -553,7 +469,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -566,7 +482,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -578,7 +494,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin // HTTPError asserts that a specified handler returns an error status code. // -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -590,7 +506,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // HTTPErrorf asserts that a specified handler returns an error status code. // -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -602,7 +518,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -614,7 +530,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -626,7 +542,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) +// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -638,7 +554,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -650,7 +566,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur // HTTPSuccess asserts that a specified handler returns a success status code. // -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -662,7 +578,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st // HTTPSuccessf asserts that a specified handler returns a success status code. // -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -674,7 +590,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s // Implements asserts that an object is implemented by the specified interface. // -// a.Implements((*MyInterface)(nil), new(MyObject)) +// a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -684,7 +600,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -694,7 +610,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, 22/7.0, 0.01) +// a.InDelta(math.Pi, 22/7.0, 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -736,7 +652,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -778,9 +694,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo // IsDecreasing asserts that the collection is decreasing // -// a.IsDecreasing([]int{2, 1, 0}) -// a.IsDecreasing([]float{2, 1}) -// a.IsDecreasing([]string{"b", "a"}) +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -790,9 +706,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) // IsDecreasingf asserts that the collection is decreasing // -// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") -// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -802,9 +718,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter // IsIncreasing asserts that the collection is increasing // -// a.IsIncreasing([]int{1, 2, 3}) -// a.IsIncreasing([]float{1, 2}) -// a.IsIncreasing([]string{"a", "b"}) +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -814,9 +730,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) // IsIncreasingf asserts that the collection is increasing // -// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") -// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -826,9 +742,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter // IsNonDecreasing asserts that the collection is not decreasing // -// a.IsNonDecreasing([]int{1, 1, 2}) -// a.IsNonDecreasing([]float{1, 2}) -// a.IsNonDecreasing([]string{"a", "b"}) +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -838,9 +754,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface // IsNonDecreasingf asserts that the collection is not decreasing // -// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -850,9 +766,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in // IsNonIncreasing asserts that the collection is not increasing // -// a.IsNonIncreasing([]int{2, 1, 1}) -// a.IsNonIncreasing([]float{2, 1}) -// a.IsNonIncreasing([]string{"b", "a"}) +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -862,9 +778,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface // IsNonIncreasingf asserts that the collection is not increasing // -// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -890,7 +806,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s // JSONEq asserts that two JSON strings are equivalent. // -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -900,7 +816,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf // JSONEqf asserts that two JSON strings are equivalent. // -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -911,7 +827,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// a.Len(mySlice, 3) +// a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -922,7 +838,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// a.Lenf(mySlice, 3, "error message %s", "formatted") +// a.Lenf(mySlice, 3, "error message %s", "formatted") func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -932,9 +848,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in // Less asserts that the first element is less than the second // -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -944,10 +860,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac // LessOrEqual asserts that the first element is less than or equal to the second // -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -957,10 +873,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i // LessOrEqualf asserts that the first element is less than or equal to the second // -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -970,9 +886,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") -// a.Lessf("a", "b", "error message %s", "formatted") +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +// a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -982,8 +898,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i // Negative asserts that the specified element is negative // -// a.Negative(-1) -// a.Negative(-1.23) +// a.Negative(-1) +// a.Negative(-1.23) func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -993,8 +909,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// a.Negativef(-1, "error message %s", "formatted") -// a.Negativef(-1.23, "error message %s", "formatted") +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1005,7 +921,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1016,7 +932,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1026,7 +942,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t // Nil asserts that the specified object is nil. // -// a.Nil(err) +// a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1036,7 +952,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// a.Nilf(err, "error message %s", "formatted") +// a.Nilf(err, "error message %s", "formatted") func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1064,10 +980,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) // NoError asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1077,10 +993,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1109,9 +1025,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1122,9 +1038,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1135,9 +1051,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1148,9 +1064,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1160,7 +1076,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface // NotEqual asserts that the specified values are NOT equal. // -// a.NotEqual(obj1, obj2) +// a.NotEqual(obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1173,7 +1089,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// a.NotEqualValues(obj1, obj2) +// a.NotEqualValues(obj1, obj2) func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1183,7 +1099,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") +// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1193,7 +1109,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m // NotEqualf asserts that the specified values are NOT equal. // -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1224,7 +1140,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in // NotNil asserts that the specified object is not nil. // -// a.NotNil(err) +// a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1234,7 +1150,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// a.NotNilf(err, "error message %s", "formatted") +// a.NotNilf(err, "error message %s", "formatted") func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1244,7 +1160,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanics(func(){ RemainCalm() }) +// a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1254,7 +1170,7 @@ func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{} // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1264,8 +1180,8 @@ func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...inte // NotRegexp asserts that a specified regexp does not match a string. // -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1275,8 +1191,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1286,7 +1202,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg // NotSame asserts that two pointers do not reference the same object. // -// a.NotSame(ptr1, ptr2) +// a.NotSame(ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1299,7 +1215,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg // NotSamef asserts that two pointers do not reference the same object. // -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1313,7 +1229,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1324,7 +1240,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1350,7 +1266,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// a.Panics(func(){ GoCrazy() }) +// a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1362,7 +1278,7 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1374,7 +1290,7 @@ func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, m // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1385,7 +1301,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1396,7 +1312,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFun // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1406,7 +1322,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFu // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1416,8 +1332,8 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa // Positive asserts that the specified element is positive // -// a.Positive(1) -// a.Positive(1.23) +// a.Positive(1) +// a.Positive(1.23) func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1427,8 +1343,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// a.Positivef(1, "error message %s", "formatted") -// a.Positivef(1.23, "error message %s", "formatted") +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1438,8 +1354,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1449,8 +1365,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1460,7 +1376,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . // Same asserts that two pointers reference the same object. // -// a.Same(ptr1, ptr2) +// a.Same(ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1473,7 +1389,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . // Samef asserts that two pointers reference the same object. // -// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// a.Samef(ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1487,7 +1403,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1498,7 +1414,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1508,7 +1424,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a // True asserts that the specified value is true. // -// a.True(myBool) +// a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1518,7 +1434,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// a.Truef(myBool, "error message %s", "formatted") +// a.Truef(myBool, "error message %s", "formatted") func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1528,7 +1444,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1538,7 +1454,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta // WithinDurationf asserts that the two times are within duration delta of each other. // -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1548,7 +1464,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta // WithinRange asserts that a time is within a time range (inclusive). // -// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1558,7 +1474,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim // WithinRangef asserts that a time is within a time range (inclusive). // -// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/yashtewari/glob-intersection/glob.go b/vendor/github.com/yashtewari/glob-intersection/glob.go index 54d4729be03..98dfa476846 100644 --- a/vendor/github.com/yashtewari/glob-intersection/glob.go +++ b/vendor/github.com/yashtewari/glob-intersection/glob.go @@ -119,11 +119,7 @@ func NewDot() Token { } func (d dot) Equal(other Token) bool { - if d.Type() != other.Type() { - return false - } - - return true + return d.Type() == other.Type() } func (d dot) String() string { diff --git a/vendor/github.com/yashtewari/glob-intersection/non_empty.go b/vendor/github.com/yashtewari/glob-intersection/non_empty.go index 91cbdbde5bf..30f819b1f33 100644 --- a/vendor/github.com/yashtewari/glob-intersection/non_empty.go +++ b/vendor/github.com/yashtewari/glob-intersection/non_empty.go @@ -102,12 +102,17 @@ func intersectSpecial(g1, g2 Glob) bool { // intersectPlus accepts two globs such that plussed[0].Flag() == FlagPlus. // It returns a boolean describing whether their intersection exists. -// It ensures that at least one token in other maches plussed[0], before handing flow of control to intersectSpecial. +// It ensures that at least one token in other maches plussed[0]. func intersectPlus(plussed, other Glob) bool { if !Match(plussed[0], other[0]) { return false } - return intersectStar(plussed, other[1:]) + // Either the plussed has gobbled up other[0]... + if intersectStar(plussed, other[1:]) { + return true + } + // ...or if other[0] has a flag, it may completely gobble up plussed[0]. + return other[0].Flag() != FlagNone && intersectNormal(plussed[1:], other) } // intersectStar accepts two globs such that starred[0].Flag() == FlagStar. @@ -145,10 +150,6 @@ func intersectStar(starred, other Glob) bool { } // If there was no token following starToken, and everything from other was gobbled, the Globs intersect. - if nextToken == nil { - return true - } - - // If everything from other was gobbles but there was a nextToken to match, they don't intersect. - return false + //If everything from other was gobbles but there was a nextToken to match, they don't intersect. + return nextToken == nil } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index cd057f39824..033b6e6db64 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewPriorityWriteScheduler(nil) + sc.writeSched = newRoundRobinWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher @@ -2429,7 +2429,7 @@ type requestBody struct { conn *serverConn closeOnce sync.Once // for use by Close only sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body + pipe *pipe // non-nil if we have an HTTP entity message body needsContinue bool // need to send a 100-continue } @@ -2569,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = "" } } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + _, hasContentLength := rws.snapHeader["Content-Length"] + if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] @@ -2774,7 +2775,7 @@ func (w *responseWriter) FlushError() error { err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it + // (writeChunk with zero bytes), so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. _, err = chunkWriter{rws}.Write(nil) diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ac90a2631c9..4f08ccba9ab 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1268,8 +1268,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cancelRequest := func(cs *clientStream, err error) error { cs.cc.mu.Lock() - defer cs.cc.mu.Unlock() cs.abortStreamLocked(err) + bodyClosed := cs.reqBodyClosed if cs.ID != 0 { // This request may have failed because of a problem with the connection, // or for some unrelated reason. (For example, the user might have canceled @@ -1284,6 +1284,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // will not help. cs.cc.doNotReuse = true } + cs.cc.mu.Unlock() + // Wait for the request body to be closed. + // + // If nothing closed the body before now, abortStreamLocked + // will have started a goroutine to close it. + // + // Closing the body before returning avoids a race condition + // with net/http checking its readTrackingBody to see if the + // body was read from or closed. See golang/go#60041. + // + // The body is closed in a separate goroutine without the + // connection mutex held, but dropping the mutex before waiting + // will keep us from holding it indefinitely if the body + // close is slow for some reason. + if bodyClosed != nil { + <-bodyClosed + } return err } @@ -1899,7 +1916,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of + // followed by the query production, see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) m := req.Method diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index c7cd0017392..cc893adc29a 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { // writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []FrameWriteRequest + s []FrameWriteRequest + prev, next *writeQueue } func (q *writeQueue) empty() bool { return len(q.s) == 0 } diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go new file mode 100644 index 00000000000..54fe86322d2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -0,0 +1,119 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type roundRobinWriteScheduler struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // streams maps stream ID to a queue. + streams map[uint32]*writeQueue + + // stream queues are stored in a circular linked list. + // head is the next stream to write, or nil if there are no streams open. + head *writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +// newRoundRobinWriteScheduler constructs a new write scheduler. +// The round robin scheduler priorizes control frames +// like SETTINGS and PING over DATA frames. +// When there are no control frames to send, it performs a round-robin +// selection from the ready streams. +func newRoundRobinWriteScheduler() WriteScheduler { + ws := &roundRobinWriteScheduler{ + streams: make(map[uint32]*writeQueue), + } + return ws +} + +func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + if ws.streams[streamID] != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = q + if ws.head == nil { + ws.head = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.head.prev + q.next = ws.head + q.prev.next = q + q.next.prev = q + } +} + +func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) { + q := ws.streams[streamID] + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.head = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.head == q { + ws.head = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {} + +func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()] + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + if ws.head == nil { + return FrameWriteRequest{}, false + } + q := ws.head + for { + if wr, ok := q.consume(math.MaxInt32); ok { + ws.head = q.next + return wr, true + } + q = q.next + if q == ws.head { + break + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 1473e1296d0..781770c2046 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 7ed02cd4144..b3e8783cc59 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -13,12 +13,15 @@ import ( "os" "path/filepath" "runtime" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials @@ -62,6 +65,18 @@ type CredentialsParams struct { // PKCE is used to support PKCE flow. Optional for 3LO flow. PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -137,7 +152,7 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) // and App Engine flexible use ComputeTokenSource and the metadata server. if appengineTokenFunc != nil { - return &DefaultCredentials{ + return &Credentials{ ProjectID: appengineAppIDFunc(ctx), TokenSource: AppEngineTokenSource(ctx, params.Scopes...), }, nil @@ -147,15 +162,14 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() - return &DefaultCredentials{ + return &Credentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", params.Scopes...), + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } // FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. @@ -194,7 +208,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params return nil, err } ts = newErrWrappingTokenSource(ts) - return &DefaultCredentials{ + return &Credentials{ ProjectID: f.ProjectID, TokenSource: ts, JSON: jsonData, @@ -216,7 +230,7 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*DefaultCredentials, error) { +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index b3e7bc85ce5..ca717634a3f 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -26,7 +26,7 @@ // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC). +// provider that supports OpenID Connect (OIDC) or SAML 2.0. // Traditionally, applications running outside Google Cloud have used service // account keys to access Google Cloud resources. Using identity federation, // you can allow your workload to impersonate a service account. @@ -36,26 +36,75 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml // // For OIDC and SAML providers, the library can retrieve tokens in three ways: // from a local file location (file-sourced credentials), from a server // (URL-sourced credentials), or from a local executable (executable-sourced // credentials). // For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC token prior to expiration. +// refreshing the file location with a new OIDC/SAML token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file // every hour. The token can be stored directly as plain text or in JSON format. // For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC token. The response can be in plain text or JSON. +// return the OIDC/SAML token. The response can be in plain text or JSON. // Additional required request headers can also be specified. // For executable-sourced credentials, an application needs to be available to -// output the OIDC token and other information in a JSON format. +// output the OIDC/SAML token and other information in a JSON format. // For more information on how these work (and how to implement // executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/using-workload-identity-federation#oidc +// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// +// # Workforce Identity Federation +// +// Workforce identity federation lets you use an external identity provider (IdP) to +// authenticate and authorize a workforce—a group of users, such as employees, partners, +// and contractors—using IAM, so that the users can access Google Cloud services. +// Workforce identity federation extends Google Cloud's identity capabilities to support +// syncless, attribute-based single sign on. +// +// With workforce identity federation, your workforce can access Google Cloud resources +// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +// Services (AD FS), Okta, and others. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml +// +// For workforce identity federation, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC/SAML token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC/SAML token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. // // # Credentials // diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 8df0c493ee4..cc1223889e2 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -26,6 +26,9 @@ var Endpoint = oauth2.Endpoint{ AuthStyle: oauth2.AuthStyleInParams, } +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://oauth2.googleapis.com/token" @@ -172,7 +175,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar cfg.Endpoint.AuthURL = Endpoint.AuthURL } if cfg.Endpoint.TokenURL == "" { - cfg.Endpoint.TokenURL = Endpoint.TokenURL + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil @@ -224,7 +231,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 3eab8df7ced..dcd252a61cc 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -67,22 +67,6 @@ type Config struct { // that include all elements in a given list, in that order. var ( - validTokenURLPatterns = []*regexp.Regexp{ - // The complicated part in the middle matches any number of characters that - // aren't period, spaces, or slashes. - regexp.MustCompile(`(?i)^[^\.\s\/\\]+\.sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts-[^\.\s\/\\]+\.p\.googleapis\.com$`), - } - validImpersonateURLPatterns = []*regexp.Regexp{ - regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials-[^\.\s\/\\]+\.p\.googleapis\.com$`), - } validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) @@ -110,25 +94,13 @@ func validateWorkforceAudience(input string) bool { // TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, validTokenURLPatterns, validImpersonateURLPatterns, "https") + return c.tokenSource(ctx, "https") } // tokenSource is a private function that's directly called by some of the tests, // because the unit test URLs are mocked, and would otherwise fail the // validity check. -func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Regexp, impersonateURLValidPats []*regexp.Regexp, scheme string) (oauth2.TokenSource, error) { - valid := validateURL(c.TokenURL, tokenURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid TokenURL provided while constructing tokenSource") - } - - if c.ServiceAccountImpersonationURL != "" { - valid := validateURL(c.ServiceAccountImpersonationURL, impersonateURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid ServiceAccountImpersonationURL provided while constructing tokenSource") - } - } - +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { if c.WorkforcePoolUserProject != "" { valid := validateWorkforceAudience(c.Audience) if !valid { diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 291df5c833f..9085fabe34e 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // State is a token to protect the user from CSRF attacks. You must // always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. +// state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well @@ -290,6 +291,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 822720341af..7c64006de69 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 8e3947c3686..e6f31d374df 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index be0423e6856..3156462715a 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -741,7 +741,8 @@ main(void) e = errors[i].num; if(i > 0 && errors[i-1].num == e) continue; - strcpy(buf, strerror(e)); + strncpy(buf, strerror(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; @@ -760,7 +761,8 @@ main(void) e = signals[i].num; if(i > 0 && signals[i-1].num == e) continue; - strcpy(buf, strsignal(e)); + strncpy(buf, strsignal(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fbaeb5fff14..6de486befe1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1699,12 +1699,23 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data) } +// elfNT_PRSTATUS is a copy of the debug/elf.NT_PRSTATUS constant so +// x/sys/unix doesn't need to depend on debug/elf and thus +// compress/zlib, debug/dwarf, and other packages. +const elfNT_PRSTATUS = 1 + func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regsout)) + iov.SetLen(int(unsafe.Sizeof(*regsout))) + return ptracePtr(PTRACE_GETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regs)) + iov.SetLen(int(unsafe.Sizeof(*regs))) + return ptracePtr(PTRACE_SETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -2420,6 +2431,21 @@ func PthreadSigmask(how int, set, oldset *Sigset_t) error { return rtSigprocmask(how, set, oldset, _C__NSIG/8) } +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index f9c7a9663c6..c5f166a1152 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -151,6 +151,21 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) +} + //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL @@ -338,8 +353,6 @@ func Uname(uname *Utsname) error { // getgid // getitimer // getlogin -// getresgid -// getresuid // getthrid // ktrace // lfs_bmapv diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f619252691e..48984202c65 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -329,6 +329,54 @@ const ( SCM_WIFI_STATUS = 0x25 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 + SF_FP = 0x38 + SF_I0 = 0x20 + SF_I1 = 0x24 + SF_I2 = 0x28 + SF_I3 = 0x2c + SF_I4 = 0x30 + SF_I5 = 0x34 + SF_L0 = 0x0 + SF_L1 = 0x4 + SF_L2 = 0x8 + SF_L3 = 0xc + SF_L4 = 0x10 + SF_L5 = 0x14 + SF_L6 = 0x18 + SF_L7 = 0x1c + SF_PC = 0x3c + SF_RETP = 0x40 + SF_V9_FP = 0x70 + SF_V9_I0 = 0x40 + SF_V9_I1 = 0x48 + SF_V9_I2 = 0x50 + SF_V9_I3 = 0x58 + SF_V9_I4 = 0x60 + SF_V9_I5 = 0x68 + SF_V9_L0 = 0x0 + SF_V9_L1 = 0x8 + SF_V9_L2 = 0x10 + SF_V9_L3 = 0x18 + SF_V9_L4 = 0x20 + SF_V9_L5 = 0x28 + SF_V9_L6 = 0x30 + SF_V9_L7 = 0x38 + SF_V9_PC = 0x78 + SF_V9_RETP = 0x80 + SF_V9_XARG0 = 0x88 + SF_V9_XARG1 = 0x90 + SF_V9_XARG2 = 0x98 + SF_V9_XARG3 = 0xa0 + SF_V9_XARG4 = 0xa8 + SF_V9_XARG5 = 0xb0 + SF_V9_XXARG = 0xb8 + SF_XARG0 = 0x44 + SF_XARG1 = 0x48 + SF_XARG2 = 0x4c + SF_XARG3 = 0x50 + SF_XARG4 = 0x54 + SF_XARG5 = 0x58 + SF_XXARG = 0x5c SIOCATMARK = 0x8905 SIOCGPGRP = 0x8904 SIOCGSTAMPNS_NEW = 0x40108907 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index da63d9d7822..722c29a0087 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2172,3 +2172,17 @@ func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + RawSyscallNoError(SYS_GETRESUID, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 6699a783e1f..9ab9abf7215 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 04f0de34b2e..3dcacd30d7e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 1e775fe0571..915761eab77 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 27b6f4df74f..2763620b01a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 7f6427899a5..8e87fdf153f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index b797045fd2d..c922314048f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 756ef7b1736..12a7a2160e0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a871266221e..a6bc32c9220 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 7bc2e24eb95..b19e8aa031d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 05d4bffd791..b4e7bceabf3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 739be6217a3..fb99594c937 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 74a25f8d643..ca3f766009c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -189,6 +189,18 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresuid(SB) + RET +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresgid(SB) + RET +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ioctl(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 7d95a197803..32cbbbc52b5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 990be245740..477a7d5b21e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index ca84727cfe8..00c3b8c20f3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2555,6 +2555,11 @@ const ( BPF_REG_8 = 0x8 BPF_REG_9 = 0x9 BPF_REG_10 = 0xa + BPF_CGROUP_ITER_ORDER_UNSPEC = 0x0 + BPF_CGROUP_ITER_SELF_ONLY = 0x1 + BPF_CGROUP_ITER_DESCENDANTS_PRE = 0x2 + BPF_CGROUP_ITER_DESCENDANTS_POST = 0x3 + BPF_CGROUP_ITER_ANCESTORS_UP = 0x4 BPF_MAP_CREATE = 0x0 BPF_MAP_LOOKUP_ELEM = 0x1 BPF_MAP_UPDATE_ELEM = 0x2 @@ -2566,6 +2571,7 @@ const ( BPF_PROG_ATTACH = 0x8 BPF_PROG_DETACH = 0x9 BPF_PROG_TEST_RUN = 0xa + BPF_PROG_RUN = 0xa BPF_PROG_GET_NEXT_ID = 0xb BPF_MAP_GET_NEXT_ID = 0xc BPF_PROG_GET_FD_BY_ID = 0xd @@ -2610,6 +2616,7 @@ const ( BPF_MAP_TYPE_CPUMAP = 0x10 BPF_MAP_TYPE_XSKMAP = 0x11 BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 0x13 BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 @@ -2620,6 +2627,10 @@ const ( BPF_MAP_TYPE_STRUCT_OPS = 0x1a BPF_MAP_TYPE_RINGBUF = 0x1b BPF_MAP_TYPE_INODE_STORAGE = 0x1c + BPF_MAP_TYPE_TASK_STORAGE = 0x1d + BPF_MAP_TYPE_BLOOM_FILTER = 0x1e + BPF_MAP_TYPE_USER_RINGBUF = 0x1f + BPF_MAP_TYPE_CGRP_STORAGE = 0x20 BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 @@ -2651,6 +2662,7 @@ const ( BPF_PROG_TYPE_EXT = 0x1c BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e + BPF_PROG_TYPE_SYSCALL = 0x1f BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2689,6 +2701,12 @@ const ( BPF_XDP_CPUMAP = 0x23 BPF_SK_LOOKUP = 0x24 BPF_XDP = 0x25 + BPF_SK_SKB_VERDICT = 0x26 + BPF_SK_REUSEPORT_SELECT = 0x27 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 0x28 + BPF_PERF_EVENT = 0x29 + BPF_TRACE_KPROBE_MULTI = 0x2a + BPF_LSM_CGROUP = 0x2b BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2696,6 +2714,9 @@ const ( BPF_LINK_TYPE_ITER = 0x4 BPF_LINK_TYPE_NETNS = 0x5 BPF_LINK_TYPE_XDP = 0x6 + BPF_LINK_TYPE_PERF_EVENT = 0x7 + BPF_LINK_TYPE_KPROBE_MULTI = 0x8 + BPF_LINK_TYPE_STRUCT_OPS = 0x9 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2733,6 +2754,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff BPF_F_CTXLEN_MASK = 0xfffff00000000 @@ -2747,6 +2769,7 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2771,10 +2794,16 @@ const ( BPF_LWT_ENCAP_SEG6 = 0x0 BPF_LWT_ENCAP_SEG6_INLINE = 0x1 BPF_LWT_ENCAP_IP = 0x2 + BPF_F_BPRM_SECUREEXEC = 0x1 + BPF_F_BROADCAST = 0x8 + BPF_F_EXCLUDE_INGRESS = 0x10 + BPF_SKB_TSTAMP_UNSPEC = 0x0 + BPF_SKB_TSTAMP_DELIVERY_MONO = 0x1 BPF_OK = 0x0 BPF_DROP = 0x2 BPF_REDIRECT = 0x7 BPF_LWT_REROUTE = 0x80 + BPF_FLOW_DISSECTOR_CONTINUE = 0x81 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 @@ -2838,6 +2867,10 @@ const ( BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_MTU_CHK_SEGS = 0x1 + BPF_MTU_CHK_RET_SUCCESS = 0x0 + BPF_MTU_CHK_RET_FRAG_NEEDED = 0x1 + BPF_MTU_CHK_RET_SEGS_TOOBIG = 0x2 BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 BPF_FD_TYPE_TRACEPOINT = 0x1 BPF_FD_TYPE_KPROBE = 0x2 @@ -2847,6 +2880,19 @@ const ( BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_CORE_FIELD_BYTE_OFFSET = 0x0 + BPF_CORE_FIELD_BYTE_SIZE = 0x1 + BPF_CORE_FIELD_EXISTS = 0x2 + BPF_CORE_FIELD_SIGNED = 0x3 + BPF_CORE_FIELD_LSHIFT_U64 = 0x4 + BPF_CORE_FIELD_RSHIFT_U64 = 0x5 + BPF_CORE_TYPE_ID_LOCAL = 0x6 + BPF_CORE_TYPE_ID_TARGET = 0x7 + BPF_CORE_TYPE_EXISTS = 0x8 + BPF_CORE_TYPE_SIZE = 0x9 + BPF_CORE_ENUMVAL_EXISTS = 0xa + BPF_CORE_ENUMVAL_VALUE = 0xb + BPF_CORE_TYPE_MATCHES = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 3723b2c224c..9645900754f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -405,7 +405,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW // Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses //sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules //sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation @@ -1354,6 +1354,17 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } +func EnumProcesses(processIds []uint32, bytesReturned *uint32) error { + // EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses + // the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy. + var p *uint32 + if len(processIds) > 0 { + p = &processIds[0] + } + size := uint32(len(processIds) * 4) + return enumProcesses(p, size, bytesReturned) +} + func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index a81ea2c7001..566dd3e315f 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -3516,12 +3516,8 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) +func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/internal/cert/default_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/default_cert.go rename to vendor/google.golang.org/api/internal/cert/default_cert.go diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go similarity index 93% rename from vendor/google.golang.org/api/transport/cert/enterprise_cert.go rename to vendor/google.golang.org/api/internal/cert/enterprise_cert.go index eaa52e07c08..1061b5f05f3 100644 --- a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go +++ b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go @@ -15,7 +15,6 @@ package cert import ( "crypto/tls" "errors" - "os" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -36,8 +35,7 @@ type ecpSource struct { func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Enterprise Certificate Proxy is not supported. + if errors.Is(err, client.ErrCredUnavailable) { return nil, errSourceUnavailable } return nil, err diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/secureconnect_cert.go rename to vendor/google.golang.org/api/internal/cert/secureconnect_cert.go diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 32d52413b30..63c66092203 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -6,10 +6,15 @@ package internal import ( "context" + "crypto/tls" "encoding/json" "errors" "fmt" "io/ioutil" + "net" + "net/http" + "os" + "time" "golang.org/x/oauth2" "google.golang.org/api/internal/impersonate" @@ -17,6 +22,8 @@ import ( "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { @@ -80,8 +87,25 @@ const ( // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. + clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + if err != nil { + return nil, err + } + params.TokenURL = oauth2Endpoint + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) + } + // By default, a standard OAuth 2.0 token source is created - cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) if err != nil { return nil, err } @@ -131,14 +155,22 @@ func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource } } -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } var v struct { QuotaProject string `json:"quota_project_id"` } - if err := json.Unmarshal(cred.JSON, &v); err != nil { + if err := json.Unmarshal(creds.JSON, &v); err != nil { return "" } return v.QuotaProject @@ -157,3 +189,35 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * ProjectID: creds.ProjectID, }, nil } + +// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. +func oauth2DialSettings(ds *DialSettings) *DialSettings { + var ods DialSettings + ods.DefaultEndpoint = google.Endpoint.TokenURL + ods.DefaultMTLSEndpoint = google.MTLSTokenURL + ods.ClientCertSource = ds.ClientCertSource + return &ods +} + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/internal/dca.go similarity index 92% rename from vendor/google.golang.org/api/transport/internal/dca/dca.go rename to vendor/google.golang.org/api/internal/dca.go index 78004f0475f..204a3fd2f3f 100644 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ b/vendor/google.golang.org/api/internal/dca.go @@ -23,15 +23,16 @@ // // This package is not intended for use by end developers. Use the // google.golang.org/api/option package to configure API clients. -package dca + +// Package internal supports the options and transport packages. +package internal import ( "net/url" "os" "strings" - "google.golang.org/api/internal" - "google.golang.org/api/transport/cert" + "google.golang.org/api/internal/cert" ) const ( @@ -43,7 +44,7 @@ const ( // GetClientCertificateSourceAndEndpoint is a convenience function that invokes // getClientCertificateSource and getEndpoint sequentially and returns the client // cert source and endpoint as a tuple. -func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { +func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { clientCertSource, err := getClientCertificateSource(settings) if err != nil { return nil, "", err @@ -65,7 +66,7 @@ func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cer // Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE // must be set to "true" to allow certificate to be used (including user provided // certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { if !isClientCertificateEnabled() { return nil, nil } else if settings.ClientCertSource != nil { @@ -94,7 +95,7 @@ func isClientCertificateEnabled() bool { // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and // WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { if settings.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 2c1b8c1abe6..7a4f6d8982e 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.108.0" +const Version = "0.114.0" diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index efcc8e6c641..20c94fa640b 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -21,7 +21,6 @@ import ( "golang.org/x/oauth2" "google.golang.org/api/internal" "google.golang.org/api/option" - "google.golang.org/api/transport/internal/dca" "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" @@ -123,7 +122,7 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if o.GRPCConn != nil { return o.GRPCConn, nil } - clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(o) + clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(o) if err != nil { return nil, err } @@ -155,14 +154,10 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C return nil, err } - if o.QuotaProject == "" { - o.QuotaProject = internal.QuotaProjectFromCreds(creds) - } - grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, + quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), requestReason: o.RequestReason, }), ) diff --git a/vendor/google.golang.org/api/transport/http/configure_http2_go116.go b/vendor/google.golang.org/api/transport/http/configure_http2_go116.go deleted file mode 100644 index 305a6929c88..00000000000 --- a/vendor/google.golang.org/api/transport/http/configure_http2_go116.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.16 -// +build go1.16 - -package http - -import ( - "net/http" - "time" - - "golang.org/x/net/http2" -) - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. This allows broken idle connections to be pruned more quickly, -// preventing the client from attempting to re-use connections that will no -// longer work. -func configureHTTP2(trans *http.Transport) { - http2Trans, err := http2.ConfigureTransports(trans) - if err == nil { - http2Trans.ReadIdleTimeout = time.Second * 31 - } -} diff --git a/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go b/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go deleted file mode 100644 index d2742d283a6..00000000000 --- a/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.16 -// +build !go1.16 - -package http - -import ( - "net/http" -) - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. The interface to do this is only available in Go 1.16 and up, so -// this performs a no-op. -func configureHTTP2(trans *http.Transport) {} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index cab709f0c05..403509d08f6 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,13 +16,13 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" + "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" - "google.golang.org/api/transport/internal/dca" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -33,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) + clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) if err != nil { return nil, "", err } @@ -65,7 +65,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna paramTransport := ¶meterTransport{ base: base, userAgent: settings.UserAgent, - quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport @@ -74,6 +73,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna case settings.NoAuth: // Do nothing. case settings.APIKey != "": + paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) trans = &transport.APIKey{ Transport: trans, Key: settings.APIKey, @@ -83,10 +83,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if paramTransport.quotaProject == "" { - paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) - } - + paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { ts = settings.TokenSource @@ -175,13 +172,22 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt } } - // If possible, configure http2 transport in order to use ReadIdleTimeout - // setting. This can only be done in Go 1.16 and up. configureHTTP2(trans) return trans } +// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the +// transport. This allows broken idle connections to be pruned more quickly, +// preventing the client from attempting to re-use connections that will no +// longer work. +func configureHTTP2(trans *http.Transport) { + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } +} + // fallbackBaseTransport is used in google.api.ClientLibraryDestination - 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1453,20 +1562,22 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 29, // [29:29] is the sub-list for method output_type - 29, // [29:29] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 26, // [26:29] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 28, // [28:31] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1619,7 +1730,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1638,7 +1749,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 14, + NumMessages: 16, NumExtensions: 3, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 164e0df0bf5..dbe2e2d0c65 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.2 +// protoc-gen-go v1.26.0 +// protoc v3.21.9 // source: google/api/field_behavior.proto package annotations @@ -149,13 +149,13 @@ var ( // // Examples: // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; // // repeated google.api.FieldBehavior field_behavior = 1052; E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 6f11b7c500f..8a0e1c345b9 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/http.proto package annotations @@ -270,15 +270,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: // - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP // request body. // - All other fields are passed via the URL query parameters, and the // parameter name is the field path in the request message. A repeated // field can be represented as multiple query parameters under the same // name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields // are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all // fields are passed via URL path and URL query parameters. // // ### Path template syntax @@ -377,13 +380,15 @@ type HttpRule struct { // Selects a method to which this rule applies. // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. // // Types that are assignable to Pattern: + // // *HttpRule_Get // *HttpRule_Put // *HttpRule_Post diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 13ea54b2940..bbcc12d29c8 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/resource.proto package annotations @@ -218,14 +218,14 @@ type ResourceDescriptor struct { // The path pattern must follow the syntax, which aligns with HTTP binding // syntax: // - // Template = Segment { "/" Segment } ; - // Segment = LITERAL | Variable ; - // Variable = "{" LITERAL "}" ; + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; // // Examples: // - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" // // The components in braces correspond to the IDs for each resource in the // hierarchy. It is expected that, if multiple patterns are provided, @@ -239,17 +239,17 @@ type ResourceDescriptor struct { // // Example: // - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission @@ -362,22 +362,22 @@ type ResourceReference struct { // // Example: // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } // // Occasionally, a field may reference an arbitrary resource. In this case, // APIs use the special value * in their resource reference. // // Example: // - // message GetIamPolicyRequest { - // string resource = 2 [(google.api.resource_reference) = { - // type: "*" - // }]; - // } + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The resource type of a child collection that the annotated field // references. This is useful for annotating the `parent` field that @@ -385,11 +385,11 @@ type ResourceReference struct { // // Example: // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index 6707a7b1c1d..9a9ae04c29a 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/routing.proto package annotations @@ -468,46 +468,46 @@ type RoutingParameter struct { // // Example: // - // -- This is a field in the request message - // | that the header value will be extracted from. - // | - // | -- This is the key name in the - // | | routing header. - // V | - // field: "table_name" v - // path_template: "projects/*/{table_location=instances/*}/tables/*" - // ^ ^ - // | | - // In the {} brackets is the pattern that -- | - // specifies what to extract from the | - // field as a value to be sent. | - // | - // The string in the field must match the whole pattern -- - // before brackets, inside brackets, after brackets. + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. // // When looking at this specific example, we can see that: - // - A key-value pair with the key `table_location` - // and the value matching `instances/*` should be added - // to the x-goog-request-params routing header. - // - The value is extracted from the request message's `table_name` field - // if it matches the full pattern specified: - // `projects/*/instances/*/tables/*`. + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. // // **NB:** If the `path_template` field is not provided, the key name is // equal to the field name, and the whole field should be sent as a value. // This makes the pattern for the field and the value functionally equivalent // to `**`, and the configuration // - // { - // field: "table_name" - // } + // { + // field: "table_name" + // } // // is a functionally equivalent shorthand to: // - // { - // field: "table_name" - // path_template: "{table_name=**}" - // } + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } // // See Example 1 for more details. PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go index d3b90903383..aa640dc31cd 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/distribution.proto package distribution @@ -66,7 +66,7 @@ type Distribution struct { // The sum of squared deviations from the mean of the values in the // population. For values x_i this is: // - // Sum[i=1..n]((x_i - mean)^2) + // Sum[i=1..n]((x_i - mean)^2) // // Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd edition // describes Welford's method for accumulating this sum in one pass. @@ -261,6 +261,7 @@ type Distribution_BucketOptions struct { // Exactly one of these three fields must be set. // // Types that are assignable to Options: + // // *Distribution_BucketOptions_LinearBuckets // *Distribution_BucketOptions_ExponentialBuckets // *Distribution_BucketOptions_ExplicitBuckets @@ -369,12 +370,12 @@ type Distribution_Exemplar struct { Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Contextual information about the example value. Examples are: // - // Trace: type.googleapis.com/google.monitoring.v3.SpanContext + // Trace: type.googleapis.com/google.monitoring.v3.SpanContext // - // Literal string: type.googleapis.com/google.protobuf.StringValue + // Literal string: type.googleapis.com/google.protobuf.StringValue // - // Labels dropped during aggregation: - // type.googleapis.com/google.monitoring.v3.DroppedLabels + // Labels dropped during aggregation: + // type.googleapis.com/google.monitoring.v3.DroppedLabels // // There may be only a single attachment of any given message type in a // single exemplar, and this is enforced by the system. @@ -442,6 +443,7 @@ func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { // following boundaries: // // Upper bound (0 <= i < N-1): offset + (width * i). +// // Lower bound (1 <= i < N): offset + (width * (i - 1)). type Distribution_BucketOptions_Linear struct { state protoimpl.MessageState @@ -517,6 +519,7 @@ func (x *Distribution_BucketOptions_Linear) GetOffset() float64 { // following boundaries: // // Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// // Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). type Distribution_BucketOptions_Exponential struct { state protoimpl.MessageState diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index af72196c809..3543268f84e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/httpbody.proto package httpbody diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go index 6d693dd9865..75397e1b14f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/label.proto package label diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index 71075313773..454948669dc 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc v3.21.9 // source: google/api/launch_stage.proto package api diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index 4f02c47cadb..2af7d129ba6 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc v3.21.9 // source: google/api/metric.proto package metric @@ -320,9 +320,10 @@ type MetricDescriptor struct { LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` // Read-only. If present, then a [time // series][google.monitoring.v3.TimeSeries], which is identified partially by - // a metric type and a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that is associated - // with this metric type can only be associated with one of the monitored - // resource types listed here. + // a metric type and a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that + // is associated with this metric type can only be associated with one of the + // monitored resource types listed here. MonitoredResourceTypes []string `protobuf:"bytes,13,rep,name=monitored_resource_types,json=monitoredResourceTypes,proto3" json:"monitored_resource_types,omitempty"` } @@ -442,8 +443,9 @@ type Metric struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. - // For example, `custom.googleapis.com/invoice/paid/amount`. + // An existing metric type, see + // [google.api.MetricDescriptor][google.api.MetricDescriptor]. For example, + // `custom.googleapis.com/invoice/paid/amount`. Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` // The set of label values that uniquely identify this metric. All // labels listed in the `MetricDescriptor` must be assigned values. @@ -502,7 +504,9 @@ type MetricDescriptor_MetricDescriptorMetadata struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Deprecated. Must use the [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] instead. + // Deprecated. Must use the + // [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] + // instead. // // Deprecated: Do not use. LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go index 1eb0df9ffd7..de791ea6143 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc v3.21.9 // source: google/api/monitored_resource.proto package monitoredres @@ -38,9 +38,10 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a -// type name and a set of labels. For example, the monitored resource -// descriptor for Google Compute Engine VM instances has a type of +// An object that describes the schema of a +// [MonitoredResource][google.api.MonitoredResource] object using a type name +// and a set of labels. For example, the monitored resource descriptor for +// Google Compute Engine VM instances has a type of // `"gce_instance"` and specifies the use of the labels `"instance_id"` and // `"zone"` to identify particular VM instances. // @@ -161,11 +162,13 @@ func (x *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { // An object representing a resource that can be used for monitoring, logging, // billing, or other purposes. Examples include virtual machine instances, // databases, and storage devices such as disks. The `type` field identifies a -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's -// schema. Information in the `labels` field identifies the actual resource and -// its attributes according to the schema. For example, a particular Compute -// Engine VM instance could be represented by the following object, because the -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object +// that describes the resource's schema. Information in the `labels` field +// identifies the actual resource and its attributes according to the schema. +// For example, a particular Compute Engine VM instance could be represented by +// the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for +// `"gce_instance"` has labels // `"project_id"`, `"instance_id"` and `"zone"`: // // { "type": "gce_instance", @@ -178,10 +181,12 @@ type MonitoredResource struct { unknownFields protoimpl.UnknownFields // Required. The monitored resource type. This field must match - // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For - // example, the type of a Compute Engine VM instance is `gce_instance`. - // Some descriptors include the service name in the type; for example, - // the type of a Datastream stream is `datastream.googleapis.com/Stream`. + // the `type` field of a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + // object. For example, the type of a Compute Engine VM instance is + // `gce_instance`. Some descriptors include the service name in the type; for + // example, the type of a Datastream stream is + // `datastream.googleapis.com/Stream`. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Required. Values for all of the labels listed in the associated monitored // resource descriptor. For example, Compute Engine VM instances use the @@ -235,12 +240,12 @@ func (x *MonitoredResource) GetLabels() map[string]string { return nil } -// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. -// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to -// uniquely identify a monitored resource instance. There is some other useful -// auxiliary metadata. Monitoring and Logging use an ingestion -// pipeline to extract metadata for cloud resources of all types, and store -// the metadata in this message. +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] +// object. [MonitoredResource][google.api.MonitoredResource] objects contain the +// minimum set of information to uniquely identify a monitored resource +// instance. There is some other useful auxiliary metadata. Monitoring and +// Logging use an ingestion pipeline to extract metadata for cloud resources of +// all types, and store the metadata in this message. type MonitoredResourceMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 8e001134da2..608aa6e1ac5 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,10 +20,6 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). - - If you are searching for features to work on, issues labeled [Status: Help Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) is a great place to start. These issues are well-documented and usually can be diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 02f5dc53189..3efca459149 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,6 +25,11 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an @@ -99,3 +104,27 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + var key, val string + if str, ok := k.(interface{ String() string }); ok { + key = str.String() + } + if str, ok := v.(interface{ String() string }); ok { + val = str.String() + } + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + first = false + } + sb.WriteString("}") + return sb.String() +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 09d61dd1b55..8f00523c0e2 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -286,7 +286,7 @@ type PickResult struct { // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metatada metadata.MD + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 6620ed11c78..f070878bd99 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 0359956d36f..04b9ad41169 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,14 +25,20 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -49,192 +55,101 @@ import ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. balancer *gracefulswitch.Balancer curBalancerName string - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -type subConnUpdate struct { - acbw *acBalancerWrapper -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) - - var res interface{} - select { - case res = <-ccb.resultCh.Get(): - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { - return nil - } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) } - addrs = append(addrs, addr) + ccs.ResolverState.Addresses = addrs } - ccs.ResolverState.Addresses = addrs + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() } -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -281,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() } -func (ccb *ccBalancerWrapper) handleClose() { - ccb.balancer.Close() - ccb.done.Fire() +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -309,31 +315,35 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return nil, err } acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -342,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -352,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -362,71 +380,31 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} - -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") - // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 8cd89dab904..ec2c2fa14dd 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d385f..e6a1dc5d75e 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index b9cc055075e..95a7459b02f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -69,6 +68,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,17 +136,29 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) disableGlobalOpts := false for _, opt := range opts { @@ -173,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -244,35 +229,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: @@ -288,57 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") + cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -484,7 +620,9 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -505,11 +643,31 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -549,7 +707,10 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -718,6 +879,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -811,9 +973,6 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -832,58 +991,62 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if equalAddresses(ac.addrs, addrs) { - return true + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - if ac.state == connectivity.Connecting { - return false - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -1036,39 +1199,40 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1098,7 +1262,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1112,6 +1277,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s if lastErr == nil { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) @@ -1137,7 +1305,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1165,15 +1334,14 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) // After exhausting all addresses, the addrConn enters // TRANSIENT_FAILURE. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if acCtx.Err() != nil { return } + ac.mu.Lock() ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1188,13 +1356,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1209,14 +1377,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1230,7 +1397,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1247,19 +1414,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1278,7 +1446,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1295,7 +1463,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1307,6 +1475,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1414,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1565,7 +1759,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1577,7 +1778,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1592,15 +1794,16 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a @@ -1623,7 +1826,15 @@ func parseTarget(target string) (resolver.Target, error) { // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1640,25 +1851,58 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + cc.authority = endpoint + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index c8a30753142..150ae557676 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -387,5 +387,7 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b // Close terminates the Handshaker. It should be called when the caller obtains // the secure connection. func (h *altsHandshaker) Close() { - h.stream.CloseSend() + if h.stream != nil { + h.stream.CloseSend() + } } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 2de2c4affda..e1cdafb980c 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -58,3 +58,21 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } + +// CloseForTesting closes all open connections to the handshaker service. +// +// For testing purposes only. +func CloseForTesting() error { + for _, hsConn := range hsConnMap { + if hsConn == nil { + continue + } + if err := hsConn.Close(); err != nil { + return err + } + } + + // Reset the connection map. + hsConnMap = make(map[string]*grpc.ClientConn) + return nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 16e814b9b91..83e3bae37b1 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 258a130a9d0..0b0093328bf 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index cc9a2705996..c2e564c7ded 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e9d6852fd2c..15a3d5102a9 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -77,6 +77,7 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration } // DialOption configures how we set up the connection. @@ -295,6 +296,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -306,6 +310,9 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -448,6 +455,9 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -646,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 0b1abc6467c..142d35f753e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go new file mode 100644 index 00000000000..dc3dc72f6b0 --- /dev/null +++ b/vendor/google.golang.org/grpc/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index af03a40d990..755fdebc1b1 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -32,6 +32,9 @@ var grpclogLogger = grpclog.Component("binarylog") // Logger specifies MethodLoggers for method names with a Log call that // takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 56fcf008d3d..6c3f632215f 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -49,6 +49,9 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { Log(context.Context, LogEntryConfig) } @@ -65,6 +68,9 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -145,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c1200d..81c2f5fd761 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 5ba9d94d49c..80fd5c7d2a4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,6 +36,10 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go index 821dd0a7c19..dd314cfb18f 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,9 +28,15 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 04136882c7b..02b4b6a1c10 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -61,11 +61,10 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -79,14 +78,18 @@ var ( // XDSFederation indicates whether federation support is enabled, which can // be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 517ea70642a..d08e3e90766 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,3 +72,17 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 00000000000..37b8d4117e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + Done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go t.run(ctx) + return t +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } + t.callbacks.Put(f) + return true +} + +func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(t.Done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } + t.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 836b6a3b3e7..42ff39c8444 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -60,6 +60,9 @@ var ( GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An @@ -69,20 +72,35 @@ var ( // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalServerOptions interface{} // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalDialOptions interface{} // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. @@ -93,9 +111,15 @@ var ( // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 00000000000..11d82afcc7e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index c343c23a530..be5a9c81eb9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -30,6 +30,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -488,12 +489,13 @@ type loopyWriter struct { bdpEst *bdpEstimator draining bool conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -507,6 +509,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, conn: conn, + logger: logger, } return l } @@ -536,8 +539,8 @@ const minBatchSize = 1000 // left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { defer func() { - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exiting with error: %v", err) + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } if !isIOError(err) { l.framer.writer.Flush() @@ -636,8 +639,8 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -692,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index e6626bf96e7..98f80e3fa00 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -83,6 +84,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) @@ -150,13 +152,14 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } func (ht *serverHandlerTransport) Close(err error) { ht.closeOnce.Do(func() { - if logger.V(logLevel) { - logger.Infof("Closing serverHandlerTransport: %v", err) + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) } close(ht.closedCh) }) @@ -450,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 9826feb8c69..326bf084800 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" @@ -145,6 +146,7 @@ type http2Client struct { bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -244,7 +246,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. if logger.V(logLevel) { - logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + logger.Infof("Aborting due to connect deadline expiring: %v", err) } conn.Close() } @@ -346,6 +348,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts bufferPool: newBufferPool(), onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -444,7 +447,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.run() close(t.writerDone) }() @@ -782,7 +785,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() - if t.activeStreams == nil { // Can be niled from Close(). + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } @@ -859,8 +862,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } } if transportDrainRequired { - if logger.V(logLevel) { - logger.Infof("transport: t.nextID > MaxStreamID. Draining") + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } t.GracefulClose() } @@ -952,8 +955,8 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: closing: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. @@ -1009,8 +1012,8 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: GracefulClose called") + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") } t.onClose(GoAwayInvalid) t.state = draining @@ -1174,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1334,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 99ae1a73746..79e86ba0883 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,7 +35,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" @@ -129,6 +131,8 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -267,6 +271,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) // Add peer information to the http2server context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -331,7 +336,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() close(t.writerDone) @@ -425,8 +430,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") } protocolError = true default: @@ -436,7 +441,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -450,8 +455,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if logger.V(logLevel) { - logger.Errorf("transport: %v", errMsg) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: http.StatusBadRequest, @@ -545,9 +550,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() - errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) - if logger.V(logLevel) { - logger.Infof("transport: %v", errMsg) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: 405, @@ -563,8 +568,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( var err error if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -638,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -682,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -942,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -1056,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -1161,18 +1166,18 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } t.controlBuf.put(closeConnection{}) case <-t.done: @@ -1223,8 +1228,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: closing: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1232,8 +1237,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && logger.V(logLevel) { - logger.Infof("transport: error closing conn during Close: %v", err) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } channelz.RemoveEntry(t.channelzID) // Cancel all active streams. @@ -1313,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1362,7 +1367,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 8fcae4f4d07..19cbb18f5ab 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -38,7 +38,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -86,7 +85,6 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) // isReservedHeader checks whether hdr belongs to HTTP2 headers diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 00000000000..42ed2b07af6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1b7d7fabc51..aa1c896595d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -726,7 +726,7 @@ type ServerTransport interface { RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index c525dc070fc..02f97595124 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -36,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -47,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -63,10 +68,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -152,14 +155,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acw, &pickResult) + doneChannelzWrapper(acbw, &pickResult) return t, pickResult, nil } return t, pickResult, nil @@ -187,6 +190,25 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fc91b4d266d..abe266b021d 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,11 +19,15 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,10 +47,28 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { state connectivity.State cc balancer.ClientConn subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { @@ -69,7 +91,8 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -82,12 +105,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return balancer.ErrBadResolverState } + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + b.cfg = cfg + } + + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { if logger.V(2) { logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) @@ -119,7 +153,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +165,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +190,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6215e5ef2b0..353c10b69a5 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,13 +22,13 @@ package resolver import ( "context" + "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -124,7 +124,7 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. BalancerAttributes *attributes.Attributes @@ -151,7 +151,17 @@ func (a Address) Equal(o Address) bool { // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 05a9d4e0bac..b408b3688f2 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,11 +19,11 @@ package grpc import ( + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,129 +31,192 @@ import ( "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} - incomingMu sync.Mutex // Synchronizes all the incoming calls. +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() } +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -172,5 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 087b9ad7c1f..81969e7c15a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -43,7 +43,6 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" @@ -146,7 +145,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan *serverWorkerData } type serverOptions struct { @@ -561,40 +560,38 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + data, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + s.handleSingleStream(data) } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +func (s *Server) handleSingleStream(data *serverWorkerData) { + defer data.wg.Done() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +} + +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan *serverWorkerData) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not @@ -898,7 +895,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -946,26 +943,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) if s.opts.numServerWorkers > 0 { data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- data: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -1054,7 +1046,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1864,7 +1856,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index f22acace425..0df11fc0988 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 623be39f26b..53910fb7c90 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -77,7 +77,9 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// *Status`, or if err wraps a type satisfying this, the appropriate Status is +// returned. For wrapped errors, the message returned contains the entire +// err.Error() text and not just the wrapped status. // // - If err is nil, a Status is returned with codes.OK and no message. // @@ -88,10 +90,15 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + return gs.GRPCStatus(), true + } + var gs grpcstatus + if errors.As(err, &gs) { + p := gs.GRPCStatus().Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -103,19 +110,16 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } // FromContextError converts a context error or wrapped context error into a diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d1226a4120f..10092685b22 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -123,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -152,6 +155,11 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -469,7 +477,7 @@ func (a *csAttempt) newStream() error { // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metatada != nil { + if a.pickResult.Metadata != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -479,7 +487,7 @@ func (a *csAttempt) newStream() error { // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metatada) + md = metadata.Join(md, a.pickResult.Metadata) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } @@ -1265,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3c6e3c9118d..0f1f8b9b33d 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.54.0" +const Version = "1.56.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index 8e4262b41e0..ce2dff35d11 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,15 +1,15 @@ -# cloud.google.com/go/compute v1.15.1 +# cloud.google.com/go/compute v1.19.1 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/monitoring v1.8.0 +# cloud.google.com/go/monitoring v1.13.0 ## explicit; go 1.19 cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal -# cloud.google.com/go/trace v1.8.0 +# cloud.google.com/go/trace v1.9.0 ## explicit; go 1.19 cloud.google.com/go/trace/apiv2 cloud.google.com/go/trace/apiv2/tracepb @@ -223,6 +223,9 @@ github.com/fsnotify/fsnotify # github.com/ghodss/yaml v1.0.0 ## explicit github.com/ghodss/yaml +# github.com/go-ini/ini v1.67.0 +## explicit +github.com/go-ini/ini # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log @@ -323,11 +326,11 @@ github.com/google/gofuzz/bytesource # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.1 +# github.com/googleapis/enterprise-certificate-proxy v0.2.3 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.7.0 +# github.com/googleapis/gax-go/v2 v2.7.1 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -344,7 +347,7 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo -# github.com/inconshreveable/mousetrap v1.0.1 +# github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap # github.com/jmespath/go-jmespath v0.4.0 @@ -411,7 +414,7 @@ github.com/onsi/gomega/types # github.com/open-policy-agent/cert-controller v0.8.0 ## explicit; go 1.20 github.com/open-policy-agent/cert-controller/pkg/rotator -# github.com/open-policy-agent/frameworks/constraint v0.0.0-20230606213221-6ccacf85c2c5 +# github.com/open-policy-agent/frameworks/constraint v0.0.0-20230712214810-96753a21c26f ## explicit; go 1.18 github.com/open-policy-agent/frameworks/constraint/deploy github.com/open-policy-agent/frameworks/constraint/pkg/apis @@ -437,7 +440,7 @@ github.com/open-policy-agent/frameworks/constraint/pkg/instrumentation github.com/open-policy-agent/frameworks/constraint/pkg/regorewriter github.com/open-policy-agent/frameworks/constraint/pkg/schema github.com/open-policy-agent/frameworks/constraint/pkg/types -# github.com/open-policy-agent/opa v0.51.0 +# github.com/open-policy-agent/opa v0.54.0 ## explicit; go 1.18 github.com/open-policy-agent/opa/ast github.com/open-policy-agent/opa/ast/internal/scanner @@ -445,15 +448,19 @@ github.com/open-policy-agent/opa/ast/internal/tokens github.com/open-policy-agent/opa/ast/location github.com/open-policy-agent/opa/bundle github.com/open-policy-agent/opa/capabilities +github.com/open-policy-agent/opa/config github.com/open-policy-agent/opa/format +github.com/open-policy-agent/opa/hooks github.com/open-policy-agent/opa/internal/bundle github.com/open-policy-agent/opa/internal/cidr/merge github.com/open-policy-agent/opa/internal/compiler/wasm github.com/open-policy-agent/opa/internal/compiler/wasm/opa +github.com/open-policy-agent/opa/internal/config github.com/open-policy-agent/opa/internal/debug github.com/open-policy-agent/opa/internal/deepcopy github.com/open-policy-agent/opa/internal/edittree github.com/open-policy-agent/opa/internal/edittree/bitvector +github.com/open-policy-agent/opa/internal/errors github.com/open-policy-agent/opa/internal/file/archive github.com/open-policy-agent/opa/internal/file/url github.com/open-policy-agent/opa/internal/future @@ -480,8 +487,10 @@ github.com/open-policy-agent/opa/internal/providers/aws/crypto github.com/open-policy-agent/opa/internal/providers/aws/v4 github.com/open-policy-agent/opa/internal/ref github.com/open-policy-agent/opa/internal/rego/opa +github.com/open-policy-agent/opa/internal/runtime/init github.com/open-policy-agent/opa/internal/semver github.com/open-policy-agent/opa/internal/strings +github.com/open-policy-agent/opa/internal/strvals github.com/open-policy-agent/opa/internal/uuid github.com/open-policy-agent/opa/internal/version github.com/open-policy-agent/opa/internal/wasm/constant @@ -495,8 +504,12 @@ github.com/open-policy-agent/opa/internal/wasm/util github.com/open-policy-agent/opa/ir github.com/open-policy-agent/opa/keys github.com/open-policy-agent/opa/loader +github.com/open-policy-agent/opa/loader/extension github.com/open-policy-agent/opa/loader/filter +github.com/open-policy-agent/opa/logging github.com/open-policy-agent/opa/metrics +github.com/open-policy-agent/opa/plugins +github.com/open-policy-agent/opa/plugins/rest github.com/open-policy-agent/opa/rego github.com/open-policy-agent/opa/resolver github.com/open-policy-agent/opa/resolver/wasm @@ -526,7 +539,7 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.15.1 +# github.com/prometheus/client_golang v1.16.0 ## explicit; go 1.17 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors @@ -540,8 +553,8 @@ github.com/prometheus/client_model/go github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.9.0 -## explicit; go 1.18 +# github.com/prometheus/procfs v0.10.1 +## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -556,10 +569,10 @@ github.com/prometheus/statsd_exporter/pkg/mapper/fsm # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/cobra v1.6.1 +# github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 @@ -568,8 +581,8 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/testify v1.8.4 -## explicit; go 1.20 +# github.com/stretchr/testify v1.8.2 +## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require # github.com/tchap/go-patricia/v2 v2.3.1 @@ -581,7 +594,7 @@ github.com/xeipuuv/gojsonpointer # github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 ## explicit github.com/xeipuuv/gojsonreference -# github.com/yashtewari/glob-intersection v0.1.0 +# github.com/yashtewari/glob-intersection v0.2.0 ## explicit; go 1.17 github.com/yashtewari/glob-intersection # go.opencensus.io v0.24.0 @@ -685,7 +698,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/mod v0.10.0 ## explicit; go 1.17 -# golang.org/x/net v0.10.0 +# golang.org/x/net v0.11.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -698,7 +711,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.5.0 +# golang.org/x/oauth2 v0.7.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -711,7 +724,7 @@ golang.org/x/oauth2/jwt ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.8.0 +# golang.org/x/sys v0.9.0 ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader @@ -719,10 +732,10 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.8.0 +# golang.org/x/term v0.9.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.9.0 +# golang.org/x/text v0.10.0 ## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -751,22 +764,21 @@ golang.org/x/time/rate # gomodules.xyz/jsonpatch/v2 v2.3.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.108.0 +# google.golang.org/api v0.114.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/impersonate google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/iterator google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/support/bundler -google.golang.org/api/transport/cert google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine @@ -781,7 +793,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa +# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -798,7 +810,7 @@ google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.54.0 +# google.golang.org/grpc v1.56.1 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes