diff --git a/.ko.yaml b/.ko.yaml index fe71c7d1ca..a50c850f1f 100644 --- a/.ko.yaml +++ b/.ko.yaml @@ -1,7 +1,30 @@ baseImageOverrides: - github.com/google/ko: golang:1.19 + github.com/google/ko: cgr.dev/chainguard/go builds: - id: ko ldflags: - "{{ .Env.LDFLAGS }}" + +verification: + # Override the default of "warn" to "deny" + noMatchPolicy: deny + policies: + # Expand the default base image policy (covers static) to include + # all Chainguard images (namely Go, see above). + - data: | + apiVersion: policy.sigstore.dev/v1beta1 + kind: ClusterImagePolicy + metadata: + name: chainguard-images + spec: + images: + - glob: cgr.dev/chainguard/** + authorities: + - keyless: + url: https://fulcio.sigstore.dev + identities: + - issuer: https://token.actions.githubusercontent.com + subject: https://github.com/chainguard-images/images/.github/workflows/release.yaml@refs/heads/main + ctlog: + url: https://rekor.sigstore.dev diff --git a/go.mod b/go.mod index af4731a5ad..b633f32830 100644 --- a/go.mod +++ b/go.mod @@ -3,30 +3,38 @@ module github.com/google/ko go 1.18 require ( - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04 - github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221027043306-dc425bc05c64 + github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206 github.com/containerd/stargz-snapshotter/estargz v0.13.0 github.com/docker/docker v20.10.22+incompatible github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b github.com/google/go-cmp v0.5.9 - github.com/google/go-containerregistry v0.12.1 + github.com/google/go-containerregistry v0.12.2-0.20221114162634-781782aa2757 github.com/opencontainers/image-spec v1.1.0-rc2 github.com/sigstore/cosign v1.13.1 + github.com/sigstore/policy-controller v0.5.2 github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.14.0 go.uber.org/automaxprocs v1.5.1 golang.org/x/sync v0.1.0 golang.org/x/tools v0.4.0 gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.26.0 k8s.io/apimachinery v0.26.0 + knative.dev/pkg v0.0.0-20221221230956-4fd6eb8652b7 sigs.k8s.io/kind v0.17.0 + sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect + cloud.google.com/go/compute v1.13.0 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect + contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect + cuelang.org/go v0.4.3 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect + github.com/Azure/azure-sdk-for-go v67.1.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.28 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect @@ -37,31 +45,60 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v1.1.0 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/OneOfOne/xxhash v1.2.8 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/agnivade/levenshtein v1.1.1 // indirect github.com/alessio/shellescape v1.4.1 // indirect + github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect + github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect + github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect + github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect + github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect + github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect + github.com/alibabacloud-go/openapi-util v0.0.11 // indirect + github.com/alibabacloud-go/tea v1.1.20 // indirect + github.com/alibabacloud-go/tea-utils v1.4.5 // indirect + github.com/alibabacloud-go/tea-xml v1.1.2 // indirect + github.com/aliyun/credentials-go v1.2.4 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go-v2 v1.16.16 // indirect - github.com/aws/aws-sdk-go-v2/config v1.17.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect - github.com/aws/smithy-go v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.17.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.4 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.17.20 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.19 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect + github.com/aws/smithy-go v1.13.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blendle/zapdriver v1.3.1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/clbanning/mxj/v2 v2.5.6 // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v20.10.20+incompatible // indirect + github.com/docker/cli v20.10.21+incompatible // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-chi/chi v4.1.2+incompatible // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect @@ -73,54 +110,120 @@ require ( github.com/go-openapi/strfmt v0.21.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.0 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.11.1 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.1.4 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221114162634-781782aa2757 // indirect + github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221114162634-781782aa2757 // indirect + github.com/google/go-github/v45 v45.2.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/in-toto/in-toto-golang v0.5.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.15.12 // indirect - github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect + github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/open-policy-agent/opa v0.45.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.13.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/statsd_exporter v0.22.8 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2 // indirect - github.com/sigstore/sigstore v1.4.4 // indirect + github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/rekor v1.0.1 // indirect + github.com/sigstore/sigstore v1.5.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/afero v1.9.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.4.1 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613 // indirect + github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/tjfoc/gmsm v1.4.1 // indirect + github.com/transparency-dev/merkle v0.0.1 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - go.mongodb.org/mongo-driver v1.10.2 // indirect - golang.org/x/crypto v0.1.0 // indirect + github.com/xanzy/go-gitlab v0.73.1 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/yashtewari/glob-intersection v0.1.0 // indirect + go.mongodb.org/mongo-driver v1.10.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.4.0 // indirect + golang.org/x/exp v0.0.0-20221026153819-32f3d567a233 // indirect golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect - golang.org/x/oauth2 v0.1.0 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/oauth2 v0.3.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/term v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect + golang.org/x/time v0.3.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/api v0.104.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect - google.golang.org/grpc v1.50.1 // indirect + google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect + google.golang.org/grpc v1.51.0 // indirect google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/client-go v0.25.4 // indirect + k8s.io/klog/v2 v2.80.2-0.20221028030830-9ae4992afb54 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/release-utils v0.7.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 75cf4b3ee8..11bdab80ab 100644 --- a/go.sum +++ b/go.sum @@ -23,10 +23,10 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -39,9 +39,17 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI= +contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= +contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= +contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= +cuelang.org/go v0.4.3 h1:W3oBBjDTm7+IZfCKZAmC8uDG0eYfJL4Pp/xbbCMKaVo= +cuelang.org/go v0.4.3/go.mod h1:7805vR9H+VoBNdWFdI7jyDR3QLUPp4+naHfbcgp55HI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= +github.com/Azure/azure-sdk-for-go v67.1.0+incompatible h1:oziYcaopbnIKfM69DL05wXdypiqfrUKdxUKrKpynJTw= +github.com/Azure/azure-sdk-for-go v67.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -61,6 +69,8 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= @@ -72,89 +82,188 @@ github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= +github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= +github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= +github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= +github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= +github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU= +github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= +github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY= +github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 h1:NqugFkGxx1TXSh/pBcU00Y6bljgDPaFdh5MUSeJ7e50= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= +github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.11 h1:iYnqOPR5hyEEnNZmebGyRMkkEJRWUEjDiiaOHZ5aNhA= +github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.20 h1:wFK4xEbvGYMtzTyHhIju9D7ecWxvSUdoLO6y4vDLFik= +github.com/alibabacloud-go/tea v1.1.20/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= +github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-xml v1.1.2 h1:oLxa7JUXm2EDFzMg+7oRsYc+kutgCVwm+bZlhhmvW5M= +github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyun/credentials-go v1.2.4 h1:qu8c21BCvbaPJArEcsSk7GbSdxYFiACCjYzkEKCoeLA= +github.com/aliyun/credentials-go v1.2.4/go.mod h1:/KowD1cfGSLrLsH28Jr8W+xwoId0ywIy5lNzDz6O1vw= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250= -github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= -github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= -github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA= -github.com/aws/aws-sdk-go-v2/config v1.17.8 h1:b9LGqNnOdg9vR4Q43tBTVWk4J6F+W774MSchvKJsqnE= -github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= -github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc= -github.com/aws/aws-sdk-go-v2/credentials v1.12.21 h1:4tjlyCD0hRGNQivh5dN8hbP30qQhMLBE/FgQR1vHHWM= -github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= -github.com/aws/aws-sdk-go-v2/service/ecr v1.4.1/go.mod h1:FglZcyeiBqcbvyinl+n14aT/EWC7S1MIH+Gan2iizt0= -github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 h1:W9vzPbvX7rOa/FacbQIDfnNrwxHkn5O+DdfmiIS4cHc= -github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5/go.mod h1:vk2+DbeZQFXznxJZSMnYrfnCHYxg4oT4Mdh59wSCkw4= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.4.1/go.mod h1:eD5Eo4drVP2FLTw0G+SMIPWNWvQRGGTtIZR2XeAagoA= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 h1:Y8dpvUxU4JecYktR5oNFEW+HmUWlA1Oh7mboTVyQWLg= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5/go.mod h1:gW979HGZOrhGvwjAS6VRgav6M9AYH9Kbey6y3GfF/EA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHDnzZP/sKbhDWV2T1EOzFIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 h1:OwhhKc1P9ElfWbMKPIbMMZBV6hzJlL2JKD76wNNVzgQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= -github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA= -github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04 h1:p2I85zYI9z5/c/3Q0LiO3RtNXcmXHTtJfml/hV16zNg= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04/go.mod h1:Z+bXnIbhKJYSvxNwsNnwde7pDKxuqlEZCbUBoTwAqf0= +github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw= +github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8= +github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/config v1.17.10/go.mod h1:/4np+UiJJKpWHN7Q+LZvqXYgyjgeXm5+lLfDI6TPZao= +github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= +github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= +github.com/aws/aws-sdk-go-v2/credentials v1.12.23/go.mod h1:0awX9iRr/+UO7OwRQFpV1hNtXxOVuehpjVEzrIAYNcA= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w= +github.com/aws/aws-sdk-go-v2/service/ecr v1.17.20 h1:nJnXfQggNZdrWz/0cm2ZGyddGK+FqTiN4QJGanzKZoY= +github.com/aws/aws-sdk-go-v2/service/ecr v1.17.20/go.mod h1:kEVGiy2tACP0cegVqx4MrjsgQMSgrtgRq1fSa+Ix6F0= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.19 h1:AwWP9a5n9a6kcgpTOfZ2/AeHKdq1Cb+HwgWQ1ADqiZM= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.19/go.mod h1:j3mVo8gEwXjgzf9PfORBnYUUQnnjkd4OY6y5JmubV94= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E= +github.com/aws/aws-sdk-go-v2/service/kms v1.19.2 h1:pgOVfu7E6zBddKGks4TvL4YuFsL/oTpiWDIzs4WPLjY= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.1/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= +github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221027043306-dc425bc05c64 h1:J+6PUCOmCU9A2iZDGsTGxdycxybJMp+fbFEMWWsQUgg= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221027043306-dc425bc05c64/go.mod h1:oqbjAk8VeItfKctyahGuAyU61z4d0Fi1gHmlWjHWsMM= +github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= +github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 h1:9Qh4lJ/KMr5iS1zfZ8I97+3MDpiKjl+0lZVUNBhdvRs= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08/go.mod h1:MAuu1uDJNOS3T3ui0qmKdPUwm59+bO19BbTph2wZafE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206 h1:lG6Usi/kX/JBZzGz1H+nV+KwM97vThQeKunCbS6PutU= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206/go.mod h1:1UmFRnmMnVsHwD+ZntmLkoVBB1ZLa6V+XXEbF6hZCxU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= +github.com/clbanning/mxj/v2 v2.5.6 h1:Jm4VaCI/+Ug5Q57IzEoZbwx4iQFA6wkXv72juUSeK+g= +github.com/clbanning/mxj/v2 v2.5.6/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= +github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v20.10.20+incompatible h1:lWQbHSHUFs7KraSN2jOJK7zbMS2jNCHI4mt4xUFUVQ4= -github.com/docker/cli v20.10.20+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU= +github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.22+incompatible h1:6jX4yB+NtcbldT90k7vBSaWJDB3i+zkVJT9BEK8kQkk= github.com/docker/docker v20.10.22+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -162,27 +271,55 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 h1:PRxIJD8XjimM5aTknUK9w6DHLDox2r2M3DI4i2pnd3w= github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936/go.mod h1:ttYvX5qlB+mlV1okblJqcSMtR4c52UKxDiX9GRBS8+Q= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.6.15 h1:XbpwxmuOPrdES97FrSfpyy67SSCV/wBIKXqgJzh6hNw= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897 h1:E52jfcE64UG42SwLmrW0QByONfGynWuzBvm86BoB9z8= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -222,9 +359,18 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b h1:0pOrjn0UzTcHdhDVdxrH8LwM7QLnAp8qiUtwXM04JEE= github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b/go.mod h1:hGGmX3bRUkYkc9aKA6mkUxi6d+f1GmZF1je0FlVTgwU= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -251,6 +397,11 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -258,9 +409,13 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -286,8 +441,16 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= +github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -296,15 +459,26 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8= -github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k= +github.com/google/go-containerregistry v0.12.2-0.20221114162634-781782aa2757 h1:pmegaRhUKXxTUrNhRz96PiBk4Ihfi6CoejIcFSWMTmg= +github.com/google/go-containerregistry v0.12.2-0.20221114162634-781782aa2757/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221114162634-781782aa2757 h1:1qKTXnWK6DsOFFfjakWJKMlpfAwmykw6Jjk9SLBsZmI= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221114162634-781782aa2757/go.mod h1:7QLaBZxN+nMCx82XO5R7qPHq0m61liEg8yca68zymHo= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221114162634-781782aa2757 h1:FsE9anmDCfnvZBx/PxdW8JDVJrAtx8zkWkQyHoxA3Jc= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221114162634-781782aa2757/go.mod h1:T6IXbpoY0IGBh0cyHZsIi/zmMBI5yInMr7ob1b+SCz0= +github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= +github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -322,24 +496,68 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 h1:GFmzYtwUMi1S2mjLxfrJ/CZ9gWDG+zeLtZByg/QEBkk= +github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9/go.mod h1:vywkS3p2SgNmPL7oAWqU5PiiknzRMp+ol3a19jfY2PQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= +github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= +github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= +github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= +github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= +github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -347,12 +565,25 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= @@ -360,15 +591,25 @@ github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kE github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be h1:Cx2bsfM27RBF/45zP1xhFN9FHDxo40LdYdE5L+GWVTw= -github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be/go.mod h1:j/WMsOEcTSfy6VR1PkiIo20qH1V9iRRzb7ishoKkN0g= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= +github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -378,47 +619,83 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= +github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= +github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto= +github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs= +github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -427,49 +704,115 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= +github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0= +github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/protocolbuffers/txtpbfmt v0.0.0-20201118171849-f6a6b3f636fc h1:gSVONBi2HWMFXCa9jFdYvYk7IwW/mTLxWOF7rXS4LO0= +github.com/qur/ar v0.0.0-20130629153254-282534b91770/go.mod h1:SjlYv2m9lpV0UW6K7lDqVJwEIIvSjaHbGk7nIfY8Hxw= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/sassoftware/go-rpmutils v0.1.1/go.mod h1:euhXULoBpvAxqrBHEyJS4Tsu3hHxUmQWNymxoJbzgUY= +github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 h1:sUNzanSKA9z/h8xXl+ZJoxIYZL0Qx306MmxqRrvUgr0= +github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74/go.mod h1:YlB8wFIZmFLZ1JllNBfSURzz52fBxbliNgYALk1UDmk= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= +github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/cosign v1.13.1 h1:+5oF8jisEcDw2TuXxCADC1u5//HfdnJhGbpv9Isiwu4= github.com/sigstore/cosign v1.13.1/go.mod h1:PlfJODkovUOKsLrGI7Su57Ie/Eb/Ks7hRHw3tn5hQS4= -github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2 h1:LD8LcwygdD2DxaINWwbkaUEBAknr205wmn66/N05s7c= -github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2/go.mod h1:C/jZ3EZywl/Kew48fGMWQoh+1LxOMk0BkP3DHmtB+8M= -github.com/sigstore/sigstore v1.4.4 h1:lVsnNTY8DUmy2hnwCPtimWfEqv+DIwleORkF8KyFsMs= -github.com/sigstore/sigstore v1.4.4/go.mod h1:wIqu9sN72+pds31MMu89GchxXHy17k+VZWc+HY1ZXMA= +github.com/sigstore/policy-controller v0.5.2 h1:0mJaYN71RDDpf1WoSEwXiuN6DOAFaC95ms3prHcVT3s= +github.com/sigstore/policy-controller v0.5.2/go.mod h1:yJtROa8mbBAXAov6XM36B3IlP0MCA7x4Rgp4W99VjuA= +github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c= +github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g= +github.com/sigstore/sigstore v1.5.0 h1:NqstQ6SwwhQsp6Ll0wgk/d9g5MlfmEppo14aquUjJ/8= +github.com/sigstore/sigstore v1.5.0/go.mod h1:fRAaZ9xXh7ZQ0GJqZdpmNJ3pemuHBu2PgIAngmzIFSI= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -477,53 +820,107 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613 h1:iGnD/q9160NWqKZZ5vY4p0dMiYMRknzctfSkqA4nBDw= +github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613/go.mod h1:g6AnIpDSYMcphz193otpSIzN+11Rs+AAIIC6rm1enug= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 h1:1i/Afw3rmaR1gF3sfVkG2X6ldkikQwA9zY380LrR5YI= github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= +github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E= +github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/xanzy/go-gitlab v0.73.1 h1:UMagqUZLJdjss1SovIC+kJCH4k2AZWXl58gJd38Y/hI= +github.com/xanzy/go-gitlab v0.73.1/go.mod h1:d/a0vswScO7Agg1CZNz15Ic6SSvBG9vfw8egL99t4kA= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg= +github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zalando/go-keyring v0.1.0/go.mod h1:RaxNwUITJaHVdQ0VC7pELPZ3tOWn13nr0gZMZEhpVU0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= -go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.mongodb.org/mongo-driver v1.10.3 h1:XDQEvmh6z1EUsXuIkXE9TaVeqHw6SwS1uf93jFs0HBA= +go.mongodb.org/mongo-driver v1.10.3/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200930160638-afb6bcd081ae/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -531,8 +928,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -543,6 +940,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20221026153819-32f3d567a233 h1:9bNbSKT4RPLEzne0Xh1v3NaNecsa1DKjkOuTbY6V9rI= +golang.org/x/exp v0.0.0-20221026153819-32f3d567a233/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -571,16 +970,21 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -596,18 +1000,25 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200930145003-4acb6c075d10/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -617,8 +1028,10 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -628,14 +1041,18 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -655,6 +1072,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -664,9 +1082,12 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -675,15 +1096,26 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -706,13 +1138,17 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -745,6 +1181,7 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -767,6 +1204,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -780,12 +1220,15 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= +google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -817,8 +1260,10 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -830,10 +1275,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc= +google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -845,11 +1292,14 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -862,24 +1312,37 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -889,6 +1352,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= @@ -899,16 +1363,30 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= +k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= +k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= +k8s.io/klog/v2 v2.80.2-0.20221028030830-9ae4992afb54 h1:hWRbsoRWt44OEBnYUd4ceLy4ofBoh+p9vauWp/I5Gdg= +k8s.io/klog/v2 v2.80.2-0.20221028030830-9ae4992afb54/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8= +k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/pkg v0.0.0-20221221230956-4fd6eb8652b7 h1:YaO4KgF1Kp8BTi1hxMXDRnvsxCFq/wpotOD3jzrHmzw= +knative.dev/pkg v0.0.0-20221221230956-4fd6eb8652b7/go.mod h1:IeUSNPPUpQnM35SjpnfCx0w5/V2RpEc+nmke6oPwpD0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.17.0 h1:CScmGz/wX66puA06Gj8OZb76Wmk7JIjgWf5JDvY7msM= sigs.k8s.io/kind v0.17.0/go.mod h1:Qqp8AiwOlMZmJWs37Hgs31xcbiYXjtXlRBSftcnZXQk= +sigs.k8s.io/release-utils v0.7.3 h1:6pS8x6c5RmdUgR9qcg1LO6hjUzuE4Yo9TGZ3DemrZdM= +sigs.k8s.io/release-utils v0.7.3/go.mod h1:n0mVez/1PZYZaZUTJmxewxH3RJ/Lf7JUDh7TG1CASOE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/pkg/commands/config.go b/pkg/commands/config.go index f327aee974..e0256a8f91 100644 --- a/pkg/commands/config.go +++ b/pkg/commands/config.go @@ -77,6 +77,14 @@ func getBaseImage(bo *options.BuildOptions) build.GetBase { if err != nil { return nil, err } + + if bo.Verifier != nil { + base := ref.Context().Digest(desc.Digest.String()) + if err := bo.Verifier.Verify(ctx, base, keychain); err != nil { + return nil, err + } + } + if desc.MediaType.IsIndex() { return desc.ImageIndex() } diff --git a/pkg/commands/options/build.go b/pkg/commands/options/build.go index 635958f8b4..da4ae2e7f0 100644 --- a/pkg/commands/options/build.go +++ b/pkg/commands/options/build.go @@ -17,8 +17,10 @@ limitations under the License. package options import ( + "context" "errors" "fmt" + "log" "os" "path/filepath" @@ -28,11 +30,32 @@ import ( "golang.org/x/tools/go/packages" "github.com/google/ko/pkg/build" + "github.com/google/ko/pkg/policy" ) const ( // configDefaultBaseImage is the default base image if not specified in .ko.yaml. configDefaultBaseImage = "cgr.dev/chainguard/static:latest" + + // configDefaultBaseImagePolicy is the default base image policy if not + // specified in .ko.yaml + configDefaultBaseImagePolicy = ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: ko-default-base-image-policy +spec: + images: + - glob: cgr.dev/chainguard/static* + authorities: + - keyless: + url: https://fulcio.sigstore.dev + identities: + - issuer: https://token.actions.githubusercontent.com + subject: https://github.com/chainguard-images/images/.github/workflows/release.yaml@refs/heads/main + ctlog: + url: https://rekor.sigstore.dev +` ) // BuildOptions represents options for the ko builder. @@ -71,6 +94,9 @@ type BuildOptions struct { // BuildConfigs stores the per-image build config from `.ko.yaml`. BuildConfigs map[string]build.Config + + // Verifier is used to check that base images satisfy configured policies. + Verifier policy.Verifier } func AddBuildOptions(cmd *cobra.Command, bo *BuildOptions) { @@ -160,7 +186,7 @@ func (bo *BuildOptions) LoadConfig() error { if len(bo.BuildConfigs) == 0 { var builds []build.Config if err := v.UnmarshalKey("builds", &builds); err != nil { - return fmt.Errorf("configuration section 'builds' cannot be parsed") + return fmt.Errorf("configuration section 'builds' cannot be parsed: %w", err) } buildConfigs, err := createBuildConfigMap(bo.WorkingDirectory, builds) if err != nil { @@ -169,9 +195,33 @@ func (bo *BuildOptions) LoadConfig() error { bo.BuildConfigs = buildConfigs } + vfy := policy.Verification{} + if err := v.UnmarshalKey("verification", &vfy); err != nil { + return fmt.Errorf("configuration section 'verification' cannot be parsed: %w", err) + } + verificationDefaults(&vfy) + vfr, err := policy.Compile(context.Background(), vfy, func(s string, i ...interface{}) { + log.Printf("WARNING: %s", fmt.Sprintf(s, i...)) + }) + if err != nil { + return fmt.Errorf("compiling verification: %w", err) + } + bo.Verifier = vfr + return nil } +func verificationDefaults(vfy *policy.Verification) { + if vfy.NoMatchPolicy == "" { + vfy.NoMatchPolicy = "warn" + } + if vfy.Policies == nil { + vfy.Policies = &[]policy.PolicyData{{ + Data: configDefaultBaseImagePolicy, + }} + } +} + func createBuildConfigMap(workingDirectory string, configs []build.Config) (map[string]build.Config, error) { buildConfigsByImportPath := make(map[string]build.Config) for i, config := range configs { diff --git a/pkg/policy/parse.go b/pkg/policy/parse.go new file mode 100644 index 0000000000..3b2133df9c --- /dev/null +++ b/pkg/policy/parse.go @@ -0,0 +1,114 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/sigstore/policy-controller/pkg/apis/policy/v1alpha1" + "github.com/sigstore/policy-controller/pkg/apis/policy/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "sigs.k8s.io/yaml" +) + +// Parse decodes a provided YAML document containing zero or more objects into +// a collection of unstructured.Unstructured objects. +func Parse(ctx context.Context, document string) ([]*unstructured.Unstructured, error) { + docs := strings.Split(document, "\n---\n") + + objs := make([]*unstructured.Unstructured, 0, len(docs)) + for i, doc := range docs { + doc = strings.TrimSpace(doc) + if doc == "" { + continue + } + var obj unstructured.Unstructured + if err := yaml.Unmarshal([]byte(doc), &obj); err != nil { + return nil, fmt.Errorf("decoding object[%d]: %w", i, err) + } + if obj.GetAPIVersion() == "" { + return nil, apis.ErrMissingField("apiVersion").ViaIndex(i) + } + if obj.GetName() == "" { + return nil, apis.ErrMissingField("metadata.name").ViaIndex(i) + } + objs = append(objs, &obj) + } + return objs, nil +} + +// ParseClusterImagePolicies returns ClusterImagePolicy objects found in the +// policy document. +func ParseClusterImagePolicies(ctx context.Context, document string) (cips []*v1alpha1.ClusterImagePolicy, warns error, err error) { + if warns, err = Validate(ctx, document); err != nil { + return nil, warns, err + } + + ol, err := Parse(ctx, document) + if err != nil { + return nil, warns, err + } + + cips = make([]*v1alpha1.ClusterImagePolicy, 0) + for _, obj := range ol { + gv, err := schema.ParseGroupVersion(obj.GetAPIVersion()) + if err != nil { + // Practically unstructured.Unstructured won't let this happen. + return nil, warns, fmt.Errorf("error parsing apiVersion of: %w", err) + } + + cip := &v1alpha1.ClusterImagePolicy{} + + switch gv.WithKind(obj.GetKind()).GroupKind() { + case v1beta1.SchemeGroupVersion.WithKind("ClusterImagePolicy").GroupKind(): + v1b1 := &v1beta1.ClusterImagePolicy{} + if err := convert(obj, v1b1); err != nil { + return nil, warns, err + } + if err := cip.ConvertFrom(ctx, v1b1); err != nil { + return nil, warns, err + } + + case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicy").GroupKind(): + // This is allowed, but we should convert things. + if err := convert(obj, cip); err != nil { + return nil, warns, err + } + + default: + continue + } + + cips = append(cips, cip) + } + return cips, warns, nil +} + +func convert(from interface{}, to runtime.Object) error { + bs, err := json.Marshal(from) + if err != nil { + return fmt.Errorf("Marshal() = %w", err) + } + if err := json.Unmarshal(bs, to); err != nil { + return fmt.Errorf("Unmarshal() = %w", err) + } + return nil +} diff --git a/pkg/policy/parse_test.go b/pkg/policy/parse_test.go new file mode 100644 index 0000000000..0c439dce78 --- /dev/null +++ b/pkg/policy/parse_test.go @@ -0,0 +1,189 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "context" + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "knative.dev/pkg/apis" +) + +func TestParse(t *testing.T) { + tests := []struct { + name string + doc string + want []*unstructured.Unstructured + wantErr error + }{{ + name: "good single object", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: {} +`, + want: []*unstructured.Unstructured{{ + Object: map[string]interface{}{ + "apiVersion": "policy.sigstore.dev/v1beta1", + "kind": "ClusterImagePolicy", + "metadata": map[string]interface{}{ + "name": "blah", + }, + "spec": map[string]interface{}{}, + }, + }}, + }, { + name: "good multi-object", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: {} +--- +--- +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: foo +spec: {} +--- +--- +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: bar +spec: {} +`, + want: []*unstructured.Unstructured{{ + Object: map[string]interface{}{ + "apiVersion": "policy.sigstore.dev/v1beta1", + "kind": "ClusterImagePolicy", + "metadata": map[string]interface{}{ + "name": "blah", + }, + "spec": map[string]interface{}{}, + }, + }, { + Object: map[string]interface{}{ + "apiVersion": "policy.sigstore.dev/v1beta1", + "kind": "ClusterImagePolicy", + "metadata": map[string]interface{}{ + "name": "foo", + }, + "spec": map[string]interface{}{}, + }, + }, { + Object: map[string]interface{}{ + "apiVersion": "policy.sigstore.dev/v1beta1", + "kind": "ClusterImagePolicy", + "metadata": map[string]interface{}{ + "name": "bar", + }, + "spec": map[string]interface{}{}, + }, + }}, + }, { + name: "bad missing apiVersion", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: {} +--- +# Missing: apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: foo +spec: {} +--- +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: bar +spec: {} +`, + wantErr: apis.ErrMissingField("[1].apiVersion"), + }, { + name: "bad missing kind", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: {} +--- +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: foo +spec: {} +--- +apiVersion: policy.sigstore.dev/v1beta1 +# Missing: kind: ClusterImagePolicy +metadata: + name: bar +spec: {} +`, + wantErr: errors.New(`decoding object[2]: error unmarshaling JSON: while decoding JSON: Object 'Kind' is missing in '{"apiVersion":"policy.sigstore.dev/v1beta1","metadata":{"name":"bar"},"spec":{}}'`), + }, { + name: "bad missing apiVersion", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + # Missing: name: blah +sp dec: {} +--- +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: foo +spec: {} +--- +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: bar +spec: {} +`, + wantErr: apis.ErrMissingField("[0].metadata.name"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, gotErr := Parse(context.Background(), test.doc) + + switch { + case (gotErr != nil) != (test.wantErr != nil): + t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr) + case gotErr != nil && gotErr.Error() != test.wantErr.Error(): + t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr) + case gotErr != nil: + return // This was an error test. + } + + if diff := cmp.Diff(got, test.want); diff != "" { + t.Errorf("Parse (-got, +want) = %s", diff) + } + }) + } +} diff --git a/pkg/policy/policy.go b/pkg/policy/policy.go new file mode 100644 index 0000000000..7622973318 --- /dev/null +++ b/pkg/policy/policy.go @@ -0,0 +1,91 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "context" + + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/apis" +) + +type Verification struct { + // NoMatchPolicy specifies the behavior when a base image doesn't match any + // of the listed policies. It allows the values: allow, deny, and warn. + NoMatchPolicy string `yaml:"no-match-policy,omitempty"` + + // Policies specifies a collection of policies to use to cover the base + // images used as part of evaluation. See "policy" below for usage. + Policies *[]PolicyData `yaml:"policies,omitempty"` +} + +// PolicyData contains a set of options for specifying a PolicyData. Exactly +// one of the fields may be specified for each PolicyData entry. +type PolicyData struct { + // Data is a collection of one or more ClusterImagePolicy resources. + Data string `yaml:"data,omitempty"` + + // TODO(mattmoor): Path support + // // Path is a path to a file or directory containing ClusterImagePolicy resources. + // // TODO(mattmoor): How do we want to handle something like -R? Perhaps we + // // don't and encourage folks to list each directory individually? + // Path string `yaml:"path,omitempty"` + + // TODO(mattmoor): URL support + // // URL links to a file containing one or more ClusterImagePolicy resources. + // URL string `yaml:"url,omitempty"` +} + +func (v *Verification) Validate(ctx context.Context) (errs *apis.FieldError) { + switch v.NoMatchPolicy { + case "allow", "deny", "warn": + // Good! + case "": + errs = errs.Also(apis.ErrMissingField("noMatchPolicy")) + default: + errs = errs.Also(apis.ErrInvalidValue(v.NoMatchPolicy, "noMatchPolicy")) + } + + if v.Policies == nil { + errs = errs.Also(apis.ErrMissingField("policies")) + } else { + for i, p := range *v.Policies { + errs = errs.Also(p.Validate(ctx).ViaFieldIndex("policies", i)) + } + } + + return errs +} + +func (pd *PolicyData) Validate(ctx context.Context) (errs *apis.FieldError) { + // Check that exactly one of the fields is set. + set := sets.NewString() + if pd.Data != "" { + set.Insert("data") + // TODO(mattmoor): Validate data. + } + // TODO(mattmoor): Check for the other fields as we add them here. + + switch set.Len() { + case 0: + // TODO: Change this to ErrMissingOneOf when we add more fields. + errs = errs.Also(apis.ErrMissingField("data")) + case 1: + // What we want. + default: + errs = errs.Also(apis.ErrMultipleOneOf(set.List()...)) + } + return errs +} diff --git a/pkg/policy/validate.go b/pkg/policy/validate.go new file mode 100644 index 0000000000..62c3ae5971 --- /dev/null +++ b/pkg/policy/validate.go @@ -0,0 +1,112 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/sigstore/policy-controller/pkg/apis/policy/v1alpha1" + "github.com/sigstore/policy-controller/pkg/apis/policy/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "knative.dev/pkg/apis" +) + +var ( + // ErrEmptyDocument is the error returned when no document body is + // specified. + ErrEmptyDocument = errors.New("document is required to create policy") + + // ErrUnknownType is the error returned when a type contained in the policy + // is unrecognized. + ErrUnknownType = errors.New("unknown type") +) + +// Validate decodes a provided YAML document containing zero or more objects +// and performs limited validation on them. +func Validate(ctx context.Context, document string) (warns error, err error) { + if len(document) == 0 { + return nil, ErrEmptyDocument + } + + uol, err := Parse(ctx, document) + if err != nil { + return nil, err + } + + for i, uo := range uol { + switch uo.GroupVersionKind() { + case v1beta1.SchemeGroupVersion.WithKind("ClusterImagePolicy"): + if warns, err = validate(ctx, uo, &v1beta1.ClusterImagePolicy{}); err != nil { + return + } + + case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicy"): + if warns, err = validate(ctx, uo, &v1alpha1.ClusterImagePolicy{}); err != nil { + return + } + + case corev1.SchemeGroupVersion.WithKind("Secret"): + if uo.GetNamespace() != "cosign-system" { + return warns, apis.ErrInvalidValue(uo.GetNamespace(), "metadata.namespace").ViaIndex(i) + } + // Any additional validation worth performing? Should we check the + // schema of the secret matches the expectations of cosigned? + + default: + return warns, fmt.Errorf("%w: %v", ErrUnknownType, uo.GroupVersionKind()) + } + } + return warns, nil +} + +type crd interface { + apis.Validatable + apis.Defaultable +} + +func validate(ctx context.Context, uo *unstructured.Unstructured, v crd) (warns error, err error) { + b, err := json.Marshal(uo) + if err != nil { + return nil, fmt.Errorf("unable to marshal: %w", err) + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.DisallowUnknownFields() + if err := dec.Decode(v); err != nil { + return nil, fmt.Errorf("unable to unmarshal: %w", err) + } + + // Apply defaulting to simulate the defaulting webhook that runs prior + // to validation. + v.SetDefaults(ctx) + + // We can't just return v.Validate(ctx) because of Go's typed nils. + // nolint:revive + if ve := v.Validate(ctx); ve != nil { + // Separate validation warnings from errors so the caller can discern between them. + if warnFE := ve.Filter(apis.WarningLevel); warnFE != nil { + warns = warnFE + } + if errorFE := ve.Filter(apis.ErrorLevel); errorFE != nil { + err = errorFE + } + } + return +} diff --git a/pkg/policy/validate_test.go b/pkg/policy/validate_test.go new file mode 100644 index 0000000000..3172aa9d8d --- /dev/null +++ b/pkg/policy/validate_test.go @@ -0,0 +1,203 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "context" + "errors" + "testing" + + policycontrollerconfig "github.com/sigstore/policy-controller/pkg/config" + "knative.dev/pkg/apis" +) + +func TestValidate(t *testing.T) { + tests := []struct { + name string + doc string + wantWarns error + wantErr error + allowEmptyAuthorities bool + }{{ + name: "good single object", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + images: + - glob: '*' + authorities: + - keyless: + identities: + - issuer: https://issuer.example.com + subject: foo@example.com + url: https://fulcio.sigstore.dev +`, + wantErr: nil, + }, { + name: "good CIP and Secret", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + images: + - glob: '*' + authorities: + - keyless: + identities: + - issuer: https://issuer.example.com + subject: foo@example.com + url: https://fulcio.sigstore.dev +--- +apiVersion: v1 +kind: Secret +metadata: + name: foo + namespace: cosign-system +stringData: + foo: bar +`, + wantErr: nil, + }, { + name: "bad secret namespace", + doc: ` +apiVersion: v1 +kind: Secret +metadata: + name: foo + namespace: something-system +stringData: + foo: bar +`, + wantErr: errors.New(`invalid value: something-system: [0].metadata.namespace`), + }, { + name: "bad image policy", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + images: + - glob: '*' + authorities: + - key: {} +`, + wantErr: apis.ErrMissingOneOf("data", "kms", "secretref").ViaField("key").ViaFieldIndex("authorities", 0).ViaField("spec"), + }, { + name: "empty document", + doc: ``, + wantErr: ErrEmptyDocument, + }, { + name: "object missing kind", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +# Missing: kind: ClusterImagePolicy +metadata: + name: blah +spec: {} +`, + wantErr: errors.New(`decoding object[0]: error unmarshaling JSON: while decoding JSON: Object 'Kind' is missing in '{"apiVersion":"policy.sigstore.dev/v1beta1","metadata":{"name":"blah"},"spec":{}}'`), + }, { + name: "unknown field", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + asdf: dfsadf +`, + wantErr: errors.New(`unable to unmarshal: json: unknown field "asdf"`), + }, { + name: "unknown type", + doc: ` +apiVersion: unknown.dev/v1 +kind: OtherPolicy +metadata: + name: blah +spec: {} +`, + wantErr: errors.New(`unknown type: unknown.dev/v1, Kind=OtherPolicy`), + }, { + name: "warning - missing field", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + images: + - glob: '*' + authorities: + - keyless: + url: https://fulcio.sigstore.dev +`, + wantWarns: errors.New("missing field(s): spec.authorities[0].keyless.identities"), + wantErr: nil, + }, + { + name: "admit - missing authorities", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + images: + - glob: '*' +`, + wantErr: nil, + allowEmptyAuthorities: true, + }, { + name: "deny - missing authorities", + doc: ` +apiVersion: policy.sigstore.dev/v1beta1 +kind: ClusterImagePolicy +metadata: + name: blah +spec: + images: + - glob: '*' +`, + wantErr: errors.New("missing field(s): spec.authorities"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testContext := context.Background() + if test.allowEmptyAuthorities { + testContext = policycontrollerconfig.ToContext(testContext, &policycontrollerconfig.PolicyControllerConfig{FailOnEmptyAuthorities: false}) + } + gotWarns, gotErr := Validate(testContext, test.doc) + if (gotErr != nil) != (test.wantErr != nil) { + t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr) + } + if (gotWarns != nil) != (test.wantWarns != nil) { + t.Fatalf("Parse() = %v, wanted %v", gotWarns, test.wantWarns) + } + if gotErr != nil && gotErr.Error() != test.wantErr.Error() { + t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr) + } + if gotWarns != nil && gotWarns.Error() != test.wantWarns.Error() { + t.Fatalf("Parse() = %v, wanted %v", gotWarns, test.wantWarns) + } + }) + } +} diff --git a/pkg/policy/verifier.go b/pkg/policy/verifier.go new file mode 100644 index 0000000000..4d7a0941f0 --- /dev/null +++ b/pkg/policy/verifier.go @@ -0,0 +1,147 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "context" + "errors" + "fmt" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + ociremote "github.com/sigstore/cosign/pkg/oci/remote" + "github.com/sigstore/policy-controller/pkg/apis/config" + "github.com/sigstore/policy-controller/pkg/webhook" + webhookcip "github.com/sigstore/policy-controller/pkg/webhook/clusterimagepolicy" + "knative.dev/pkg/apis" +) + +// Verifier is the interface for checking that a given image digest satisfies +// the policies backing this interface. +type Verifier interface { + // Verify checks that the provided digest satisfies the backing policies. + Verify(context.Context, name.Digest, authn.Keychain) error +} + +// WarningWriter is used to surface warning messages in a manner that +// is customizable by callers that's suitable for their execution +// environment. +type WarningWriter func(string, ...interface{}) + +// Compile turns a Verification into an executable Verifier. +// Any compilation errors are returned here. +func Compile(ctx context.Context, v Verification, ww WarningWriter) (Verifier, error) { + // TODO(mattmoor): Validate NoMatchPolicy. + // TODO(mattmoor): Validate Policies. + + ipc, err := gather(context.Background(), v, ww) + if err != nil { + return nil, err + } + + return &impl{ + verification: v, + ipc: ipc, + ww: ww, + }, nil +} + +func gather(ctx context.Context, v Verification, ww WarningWriter) (*config.ImagePolicyConfig, error) { + pol := *v.Policies + ipc := &config.ImagePolicyConfig{ + Policies: make(map[string]webhookcip.ClusterImagePolicy, len(pol)), + } + + for i, p := range pol { + switch { + case p.Data != "": + l, warns, err := ParseClusterImagePolicies(ctx, p.Data) + if err != nil { + return nil, fmt.Errorf("parsing policies: %w", err) + } + if warns != nil { + ww("policy %d: %v", i, warns) + } + + // TODO(mattmoor): Add additional checks for unsupported things, + // like Match, IncludeSpec, etc. + + for _, cip := range l { + cip.SetDefaults(ctx) + if _, ok := ipc.Policies[cip.Name]; ok { + ww("duplicate policy named %q, skipping.", cip.Name) + continue + } + ipc.Policies[cip.Name] = *webhookcip.ConvertClusterImagePolicyV1alpha1ToWebhook(cip) + } + default: + return nil, fmt.Errorf("unsupported policy shape: %v", p) + } + } + + return ipc, nil +} + +type impl struct { + verification Verification + + ipc *config.ImagePolicyConfig + ww WarningWriter +} + +// Check that impl implements Verifier +var _ Verifier = (*impl)(nil) + +// Verify implements Verifier +func (i *impl) Verify(ctx context.Context, d name.Digest, kc authn.Keychain) error { + matches, err := i.ipc.GetMatchingPolicies(d.Name(), "" /* kind */, "" /* apiVersion */, nil /* labels */) + if err != nil { + return err + } + + if len(matches) == 0 { + switch i.verification.NoMatchPolicy { + case "allow": + return nil + case "warn": + i.ww("%s is uncovered by policy", d) + case "deny": + return fmt.Errorf("%s is uncovered by policy", d) + default: + return fmt.Errorf("unsupported noMatchPolicy: %q", i.verification.NoMatchPolicy) + } + } + + for _, p := range matches { + _, errs := webhook.ValidatePolicy(ctx, "" /* namespace */, d, p, + kc, ociremote.WithRemoteOptions(remote.WithAuthFromKeychain(kc))) + for _, err := range errs { + var fe *apis.FieldError + if errors.As(err, &fe) { + if warnFE := fe.Filter(apis.WarningLevel); warnFE != nil { + i.ww("%v", warnFE) + } + if errorFE := fe.Filter(apis.ErrorLevel); errorFE != nil { + return errorFE + } + } else { + return err + } + } + } + + return nil +} diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 5ac4a843e1..efedadbea2 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.12.1" +const Version = "1.13.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 8631b6d6d2..6e3ee8d6ab 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + ## [0.1.0] (2022-10-26) Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 50538b1d34..d4aad9bf39 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, Timeout: 5 * time.Second, } diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.gitignore b/vendor/contrib.go.opencensus.io/exporter/ocagent/.gitignore new file mode 100644 index 0000000000..c435b7ebb6 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.gitignore @@ -0,0 +1,17 @@ +# IntelliJ IDEA +.idea +*.iml +.editorconfig + +# VS Code +.vscode + +# OS X +.DS_Store + +# Emacs +*~ +\#*\# + +# Vim +.swp diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml new file mode 100644 index 0000000000..f53103b1a3 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml @@ -0,0 +1,20 @@ +language: go + +go: + - 1.11.x + +go_import_path: contrib.go.opencensus.io/exporter/ocagent + +install: skip + +before_script: + - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any + - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any + - GO111MODULE=on # Depend on go.mod for dependencies + +script: + - go build ./... # Ensure dependency updates don't break build + - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi + - go vet ./... + - go test -v -race $PKGS # Run all the tests with the race detector enabled + - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md new file mode 100644 index 0000000000..0786fdf434 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md new file mode 100644 index 0000000000..3b9e908f59 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md @@ -0,0 +1,61 @@ +# OpenCensus Agent Go Exporter + +[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] + + +This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. +OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from +OpenCensus Library, export them to other backends and possibly push configurations back to +Library. See more details on [OC-Agent Readme][OCAgentReadme]. + +Note: This is an experimental repository and is likely to get backwards-incompatible changes. +Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. + +## Installation + +```bash +$ go get -u contrib.go.opencensus.io/exporter/ocagent +``` + +## Usage + +```go +import ( + "context" + "fmt" + "log" + "time" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/trace" +) + +func Example() { + exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) + if err != nil { + log.Fatalf("Failed to create the agent exporter: %v", err) + } + defer exp.Stop() + + // Now register it as a trace exporter. + trace.RegisterExporter(exp) + + // Then use the OpenCensus tracing library, like we normally would. + ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") + defer span.End() + + for i := 0; i < 10; i++ { + _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) + <-time.After(6 * time.Millisecond) + iSpan.End() + } +} +``` + +[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto +[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent +[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master +[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent + diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go new file mode 100644 index 0000000000..297e44b6e7 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math/rand" + "time" +) + +var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) + +// retries function fn upto n times, if fn returns an error lest it returns nil early. +// It applies exponential backoff in units of (1< 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + traceExporter, err := traceSvcClient.Export(ctx) + if err != nil { + return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) + } + + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := traceExporter.Send(firstTraceMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + ae.mu.Lock() + ae.traceExporter = traceExporter + ae.mu.Unlock() + + // Initiate the config service by sending over node identifier info. + configStream, err := traceSvcClient.Config(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) + } + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} + if err := configStream.Send(firstCfgMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + // In the background, handle trace configurations that are beamed down + // by the agent, but also reply to it with the applied configuration. + go ae.handleConfigStreaming(configStream) + + return nil +} + +func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { + metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) + metricsExporter, err := metricsSvcClient.Export(context.Background()) + if err != nil { + return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) + } + // Initiate the metrics service by sending over the first message just containing the Node and Resource. + firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := metricsExporter.Send(firstMetricsMessage); err != nil { + return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) + } + + ae.mu.Lock() + ae.metricsExporter = metricsExporter + ae.mu.Unlock() + + // With that we are good to go and can start sending metrics + return nil +} + +func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { + addr := ae.prepareAgentAddress() + var dialOpts []grpc.DialOption + if ae.clientTransportCredentials != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) + } else if ae.canDialInsecure { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } + if ae.compressor != "" { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) + } + dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + if len(ae.grpcDialOptions) != 0 { + dialOpts = append(dialOpts, ae.grpcDialOptions...) + } + + ctx := context.Background() + if len(ae.headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + return grpc.DialContext(ctx, addr, dialOpts...) +} + +func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { + // Note: We haven't yet implemented configuration sending so we + // should NOT be changing connection states within this function for now. + for { + recv, err := configStream.Recv() + if err != nil { + // TODO: Check if this is a transient error or exponential backoff-able. + return err + } + cfg := recv.Config + if cfg == nil { + continue + } + + // Otherwise now apply the trace configuration sent down from the agent + if psamp := cfg.GetProbabilitySampler(); psamp != nil { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) + } else if csamp := cfg.GetConstantSampler(); csamp != nil { + alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON + if alwaysSample { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + } else { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + } + } else { // TODO: Add the rate limiting sampler here + } + + // Then finally send back to upstream the newly applied configuration + err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) + if err != nil { + return err + } + } +} + +// Stop shuts down all the connections and resources +// related to the exporter. +func (ae *Exporter) Stop() error { + ae.mu.RLock() + cc := ae.grpcClientConn + started := ae.started + stopped := ae.stopped + ae.mu.RUnlock() + + if !started { + return errNotStarted + } + if stopped { + // TODO: tell the user that we've already stopped, so perhaps a sentinel error? + return nil + } + + ae.Flush() + + // Now close the underlying gRPC connection. + var err error + if cc != nil { + err = cc.Close() + } + + // At this point we can change the state variables: started and stopped + ae.mu.Lock() + ae.started = false + ae.stopped = true + ae.mu.Unlock() + close(ae.stopCh) + + // Ensure that the backgroundConnector returns + <-ae.backgroundConnectionDoneCh + + return err +} + +func (ae *Exporter) ExportSpan(sd *trace.SpanData) { + if sd == nil { + return + } + _ = ae.traceBundler.Add(sd, 1) +} + +func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { + if batch == nil || len(batch.Spans) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil { + return fmt.Errorf("ExportTraceServiceRequest: no active connection, last connection error: %v", lastConnectErr) + } + + ae.senderMu.Lock() + err := ae.traceExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + if err == io.EOF { + ae.recvMu.Lock() + // Perform a .Recv to try to find out why the RPC actually ended. + // See: + // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100 + // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ + for { + _, err = ae.traceExporter.Recv() + if err != nil { + break + } + } + ae.recvMu.Unlock() + } + + ae.setStateDisconnected(err) + if err != io.EOF { + return err + } + } + return nil + } +} + +func (ae *Exporter) ExportView(vd *view.Data) { + if vd == nil { + return + } + _ = ae.viewDataBundler.Add(vd, 1) +} + +// ExportMetricsServiceRequest sends proto metrics with the metrics service client. +func (ae *Exporter) ExportMetricsServiceRequest(batch *agentmetricspb.ExportMetricsServiceRequest) error { + if batch == nil || len(batch.Metrics) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil { + return fmt.Errorf("ExportMetricsServiceRequest: no active connection, last connection error: %v", lastConnectErr) + } + + ae.senderMu.Lock() + err := ae.metricsExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + if err == io.EOF { + ae.recvMu.Lock() + // Perform a .Recv to try to find out why the RPC actually ended. + // See: + // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100 + // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ + for { + _, err = ae.metricsExporter.Recv() + if err != nil { + break + } + } + ae.recvMu.Unlock() + } + + ae.setStateDisconnected(err) + if err != io.EOF { + return err + } + } + return nil + } +} + +func ocSpanDataToPbSpans(sdl []*trace.SpanData, spanConfig SpanConfig) []*tracepb.Span { + if len(sdl) == 0 { + return nil + } + protoSpans := make([]*tracepb.Span, 0, len(sdl)) + for _, sd := range sdl { + if sd != nil { + protoSpans = append(protoSpans, ocSpanToProtoSpan(sd, spanConfig)) + } + } + return protoSpans +} + +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoSpans := ocSpanDataToPbSpans(sdl, ae.spanConfig) + if len(protoSpans) == 0 { + return + } + ae.senderMu.Lock() + err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + Spans: protoSpans, + Resource: ae.resource, + }) + ae.senderMu.Unlock() + if err != nil { + ae.setStateDisconnected(err) + } + } +} + +func ocViewDataToPbMetrics(vdl []*view.Data, metricNamePrefix string) []*metricspb.Metric { + if len(vdl) == 0 { + return nil + } + metrics := make([]*metricspb.Metric, 0, len(vdl)) + for _, vd := range vdl { + if vd != nil { + vmetric, err := viewDataToMetric(vd, metricNamePrefix) + // TODO: (@odeke-em) somehow report this error, if it is non-nil. + if err == nil && vmetric != nil { + metrics = append(metrics, vmetric) + } + } + } + return metrics +} + +func (ae *Exporter) uploadViewData(vdl []*view.Data) { + protoMetrics := ocViewDataToPbMetrics(vdl, ae.metricNamePerfix) + if len(protoMetrics) == 0 { + return + } + req := &agentmetricspb.ExportMetricsServiceRequest{ + Metrics: protoMetrics, + Resource: ae.resource, + // TODO:(@odeke-em) + // a) Figure out how to derive a Node from the environment + // or better letting users of the exporter configure it. + } + ae.ExportMetricsServiceRequest(req) +} + +func (ae *Exporter) Flush() { + ae.traceBundler.Flush() + ae.viewDataBundler.Flush() +} + +func resourceProtoFromEnv() *resourcepb.Resource { + rs, _ := resource.FromEnv(context.Background()) + if rs == nil { + return nil + } + return resourceToResourcePb(rs) +} + +func resourceToResourcePb(rs *resource.Resource) *resourcepb.Resource { + rprs := &resourcepb.Resource{ + Type: rs.Type, + } + if rs.Labels != nil { + rprs.Labels = make(map[string]string) + for k, v := range rs.Labels { + rprs.Labels[k] = v + } + } + return rprs +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go new file mode 100644 index 0000000000..148a564575 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -0,0 +1,206 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "time" + + "go.opencensus.io/resource" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +const ( + DefaultAgentPort uint16 = 55678 + DefaultAgentHost string = "localhost" +) + +type ExporterOption interface { + withExporter(e *Exporter) +} + +type resourceDetector resource.Detector + +var _ ExporterOption = (*resourceDetector)(nil) + +func (rd resourceDetector) withExporter(e *Exporter) { + e.resourceDetector = resource.Detector(rd) +} + +// WithResourceDetector allows one to register a resource detector. Resource Detector is used +// to detect resources associated with the application. Detected resource is exported +// along with the metrics. If the detector fails then it panics. +// If a resource detector is not provided then by default it detects from the environment. +func WithResourceDetector(rd resource.Detector) ExporterOption { + return resourceDetector(rd) +} + +type insecureGrpcConnection int + +var _ ExporterOption = (*insecureGrpcConnection)(nil) + +func (igc *insecureGrpcConnection) withExporter(e *Exporter) { + e.canDialInsecure = true +} + +// WithInsecure disables client transport security for the exporter's gRPC connection +// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure +// does. Note, by default, client security is required unless WithInsecure is used. +func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } + +type addressSetter string + +func (as addressSetter) withExporter(e *Exporter) { + e.agentAddress = string(as) +} + +var _ ExporterOption = (*addressSetter)(nil) + +// WithAddress allows one to set the address that the exporter will +// connect to the agent on. If unset, it will instead try to use +// connect to DefaultAgentHost:DefaultAgentPort +func WithAddress(addr string) ExporterOption { + return addressSetter(addr) +} + +type serviceNameSetter string + +func (sns serviceNameSetter) withExporter(e *Exporter) { + e.serviceName = string(sns) +} + +var _ ExporterOption = (*serviceNameSetter)(nil) + +// WithServiceName allows one to set/override the service name +// that the exporter will report to the agent. +func WithServiceName(serviceName string) ExporterOption { + return serviceNameSetter(serviceName) +} + +type reconnectionPeriod time.Duration + +func (rp reconnectionPeriod) withExporter(e *Exporter) { + e.reconnectionPeriod = time.Duration(rp) +} + +func WithReconnectionPeriod(rp time.Duration) ExporterOption { + return reconnectionPeriod(rp) +} + +type compressorSetter string + +func (c compressorSetter) withExporter(e *Exporter) { + e.compressor = string(c) +} + +// UseCompressor will set the compressor for the gRPC client to use when sending requests. +// It is the responsibility of the caller to ensure that the compressor set has been registered +// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some +// compressors auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"` +func UseCompressor(compressorName string) ExporterOption { + return compressorSetter(compressorName) +} + +type headerSetter map[string]string + +func (h headerSetter) withExporter(e *Exporter) { + e.headers = map[string]string(h) +} + +// WithHeaders will send the provided headers when the gRPC stream connection +// is instantiated +func WithHeaders(headers map[string]string) ExporterOption { + return headerSetter(headers) +} + +type clientCredentials struct { + credentials.TransportCredentials +} + +var _ ExporterOption = (*clientCredentials)(nil) + +// WithTLSCredentials allows the connection to use TLS credentials +// when talking to the server. It takes in grpc.TransportCredentials instead +// of say a Certificate file or a tls.Certificate, because the retrieving +// these credentials can be done in many ways e.g. plain file, in code tls.Config +// or by certificate rotation, so it is up to the caller to decide what to use. +func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { + return &clientCredentials{TransportCredentials: creds} +} + +func (cc *clientCredentials) withExporter(e *Exporter) { + e.clientTransportCredentials = cc.TransportCredentials +} + +type grpcDialOptions []grpc.DialOption + +var _ ExporterOption = (*grpcDialOptions)(nil) + +// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts +// with some other configuration the GRPC specified via the agent the ones here will +// take preference since they are set last. +func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption { + return grpcDialOptions(opts) +} + +func (opts grpcDialOptions) withExporter(e *Exporter) { + e.grpcDialOptions = opts +} + +type metricNamePrefixSetter string + +var _ ExporterOption = (*metricNamePrefixSetter)(nil) + +func (p metricNamePrefixSetter) withExporter(e *Exporter) { + e.metricNamePerfix = string(p) +} + +// WithMetricNamePrefix provides an option for the caller to add a prefix to metric names. +func WithMetricNamePrefix(prefix string) ExporterOption { + return metricNamePrefixSetter(prefix) +} + +type dataBundlerOptions struct { + delay time.Duration + count int +} + +var _ ExporterOption = (*dataBundlerOptions)(nil) + +func (b dataBundlerOptions) withExporter(e *Exporter) { + if b.delay > 0 { + e.viewDataDelay = b.delay + } + if b.count > 0 { + e.viewDataBundleCount = b.count + } +} + +// WithDataBundlerOptions provides an option for the caller to configure the metrics data bundler. +func WithDataBundlerOptions(delay time.Duration, count int) ExporterOption { + return dataBundlerOptions{delay, count} +} + +func (spanConfig SpanConfig) withExporter(e *Exporter) { + e.spanConfig = spanConfig +} + +var _ ExporterOption = (*SpanConfig)(nil) + +// WithSpanConfig allows one to set the AnnotationEventsPerSpan and MessageEventsPerSpan +func WithSpanConfig(spanConfig SpanConfig) ExporterOption { + return spanConfig +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/span_config.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/span_config.go new file mode 100644 index 0000000000..8d3d60b1d8 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/span_config.go @@ -0,0 +1,25 @@ +package ocagent + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 +) + +type SpanConfig struct { + AnnotationEventsPerSpan int + MessageEventsPerSpan int +} + +func (spanConfig SpanConfig) GetAnnotationEventsPerSpan() int { + if spanConfig.AnnotationEventsPerSpan <= 0 { + return maxAnnotationEventsPerSpan + } + return spanConfig.AnnotationEventsPerSpan +} + +func (spanConfig SpanConfig) GetMessageEventsPerSpan() int { + if spanConfig.MessageEventsPerSpan <= 0 { + return maxMessageEventsPerSpan + } + return spanConfig.MessageEventsPerSpan +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go new file mode 100644 index 0000000000..409afe1edb --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -0,0 +1,243 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math" + "time" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/golang/protobuf/ptypes/timestamp" +) + +func ocSpanToProtoSpan(sd *trace.SpanData, spanConfig SpanConfig) *tracepb.Span { + if sd == nil { + return nil + } + var namePtr *tracepb.TruncatableString + if sd.Name != "" { + namePtr = &tracepb.TruncatableString{Value: sd.Name} + } + return &tracepb.Span{ + TraceId: sd.TraceID[:], + SpanId: sd.SpanID[:], + ParentSpanId: sd.ParentSpanID[:], + Status: ocStatusToProtoStatus(sd.Status), + StartTime: timeToTimestamp(sd.StartTime), + EndTime: timeToTimestamp(sd.EndTime), + Links: ocLinksToProtoLinks(sd.Links), + Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), + Name: namePtr, + Attributes: ocAttributesToProtoAttributes(sd.Attributes), + TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents, spanConfig), + Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), + } +} + +var blankStatus trace.Status + +func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { + if status == blankStatus { + return nil + } + return &tracepb.Status{ + Code: status.Code, + Message: status.Message, + } +} + +func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, ocLink := range links { + // This redefinition is necessary to prevent ocLink.*ID[:] copies + // being reused -- in short we need a new ocLink per iteration. + ocLink := ocLink + + sl = append(sl, &tracepb.Span_Link{ + TraceId: ocLink.TraceID[:], + SpanId: ocLink.SpanID[:], + Type: ocLinkTypeToProtoLinkType(ocLink.Type), + }) + } + + return &tracepb.Span_Links{ + Link: sl, + } +} + +func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { + switch oct { + case trace.LinkTypeChild: + return tracepb.Span_Link_CHILD_LINKED_SPAN + case trace.LinkTypeParent: + return tracepb.Span_Link_PARENT_LINKED_SPAN + default: + return tracepb.Span_Link_TYPE_UNSPECIFIED + } +} + +func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { + if len(attrs) == 0 { + return nil + } + outMap := make(map[string]*tracepb.AttributeValue) + for k, v := range attrs { + switch v := v.(type) { + case bool: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} + + case int: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} + + case int64: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} + + case string: + outMap[k] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: v}, + }, + } + } + } + return &tracepb.Span_Attributes{ + AttributeMap: outMap, + } +} + +// This code is mostly copied from +// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 +func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent, spanConfig SpanConfig) *tracepb.Span_TimeEvents { + if len(as) == 0 && len(es) == 0 { + return nil + } + + timeEvents := &tracepb.Span_TimeEvents{} + var annotations, droppedAnnotationsCount int + var messageEvents, droppedMessageEventsCount int + + // Transform annotations + for i, a := range as { + if annotations >= spanConfig.GetAnnotationEventsPerSpan() { + droppedAnnotationsCount = len(as) - i + break + } + annotations++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(a.Time), + Value: transformAnnotationToTimeEvent(&a), + }, + ) + } + + // Transform message events + for i, e := range es { + if messageEvents >= spanConfig.GetMessageEventsPerSpan() { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(e.Time), + Value: transformMessageEventToTimeEvent(&e), + }, + ) + } + + // Process dropped counter + timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + + return timeEvents +} + +func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { + return &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: a.Message}, + Attributes: ocAttributesToProtoAttributes(a.Attributes), + }, + } +} + +func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { + return &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: uint64(e.MessageID), + UncompressedSize: uint64(e.UncompressedByteSize), + CompressedSize: uint64(e.CompressedByteSize), + }, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} + +func timeToTimestamp(t time.Time) *timestamp.Timestamp { + nanoTime := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: nanoTime / 1e9, + Nanos: int32(nanoTime % 1e9), + } +} + +func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindClient: + return tracepb.Span_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SERVER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} + +func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { + if ts == nil { + return nil + } + return &tracepb.Span_Tracestate{ + Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), + } +} + +func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { + protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) + for _, entry := range entries { + protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ + Key: entry.Key, + Value: entry.Value, + }) + } + return protoEntries +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go new file mode 100644 index 0000000000..4516091252 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go @@ -0,0 +1,278 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "errors" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/golang/protobuf/ptypes/timestamp" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" +) + +var ( + errNilMeasure = errors.New("expecting a non-nil stats.Measure") + errNilView = errors.New("expecting a non-nil view.View") + errNilViewData = errors.New("expecting a non-nil view.Data") +) + +func viewDataToMetric(vd *view.Data, metricNamePrefix string) (*metricspb.Metric, error) { + if vd == nil { + return nil, errNilViewData + } + + descriptor, err := viewToMetricDescriptor(vd.View, metricNamePrefix) + if err != nil { + return nil, err + } + + timeseries, err := viewDataToTimeseries(vd) + if err != nil { + return nil, err + } + + metric := &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: timeseries, + } + return metric, nil +} + +func viewToMetricDescriptor(v *view.View, metricNamePrefix string) (*metricspb.MetricDescriptor, error) { + if v == nil { + return nil, errNilView + } + if v.Measure == nil { + return nil, errNilMeasure + } + + name := stringOrCall(v.Name, v.Measure.Name) + if len(metricNamePrefix) > 0 { + name = metricNamePrefix + "/" + name + } + desc := &metricspb.MetricDescriptor{ + Name: name, + Description: stringOrCall(v.Description, v.Measure.Description), + Unit: v.Measure.Unit(), + Type: aggregationToMetricDescriptorType(v), + LabelKeys: tagKeysToLabelKeys(v.TagKeys), + } + return desc, nil +} + +func stringOrCall(first string, call func() string) string { + if first != "" { + return first + } + return call() +} + +type measureType uint + +const ( + measureUnknown measureType = iota + measureInt64 + measureFloat64 +) + +func measureTypeFromMeasure(m stats.Measure) measureType { + switch m.(type) { + default: + return measureUnknown + case *stats.Float64Measure: + return measureFloat64 + case *stats.Int64Measure: + return measureInt64 + } +} + +func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { + if v == nil || v.Aggregation == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + if v.Measure == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + + switch v.Aggregation.Type { + case view.AggTypeCount: + // Cumulative on int64 + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + + case view.AggTypeDistribution: + // Cumulative types + return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + + case view.AggTypeLastValue: + // Gauge types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_GAUGE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_GAUGE_INT64 + } + + case view.AggTypeSum: + // Cumulative types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + } + } + + // For all other cases, return unspecified. + return metricspb.MetricDescriptor_UNSPECIFIED +} + +func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { + labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) + for _, tagKey := range tagKeys { + labelKeys = append(labelKeys, &metricspb.LabelKey{ + Key: tagKey.Name(), + }) + } + return labelKeys +} + +func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { + if vd == nil || len(vd.Rows) == 0 { + return nil, nil + } + + // Given that view.Data only contains Start, End + // the timestamps for all the row data will be the exact same + // per aggregation. However, the values will differ. + // Each row has its own tags. + startTimestamp := timeToProtoTimestamp(vd.Start) + endTimestamp := timeToProtoTimestamp(vd.End) + + mType := measureTypeFromMeasure(vd.View.Measure) + timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) + // It is imperative that the ordering of "LabelValues" matches those + // of the Label keys in the metric descriptor. + for _, row := range vd.Rows { + labelValues := labelValuesFromTags(row.Tags) + point := rowToPoint(vd.View, row, endTimestamp, mType) + timeseries = append(timeseries, &metricspb.TimeSeries{ + StartTimestamp: startTimestamp, + LabelValues: labelValues, + Points: []*metricspb.Point{point}, + }) + } + + if len(timeseries) == 0 { + return nil, nil + } + + return timeseries, nil +} + +func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { + unixNano := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: int64(unixNano / 1e9), + Nanos: int32(unixNano % 1e9), + } +} + +func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { + pt := &metricspb.Point{ + Timestamp: endTimestamp, + } + + switch data := row.Data.(type) { + case *view.CountData: + pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} + + case *view.DistributionData: + pt.Value = &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + Count: data.Count, + Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count + // TODO: Add Exemplar + Buckets: bucketsToProtoBuckets(data.CountPerBucket), + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: v.Aggregation.Buckets, + }, + }, + }, + SumOfSquaredDeviation: data.SumOfSquaredDev, + }} + + case *view.LastValueData: + setPointValue(pt, data.Value, mType) + + case *view.SumData: + setPointValue(pt, data.Value, mType) + } + + return pt +} + +// Not returning anything from this function because metricspb.Point.is_Value is an unexported +// interface hence we just have to set its value by pointer. +func setPointValue(pt *metricspb.Point, value float64, mType measureType) { + if mType == measureInt64 { + pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} + } else { + pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} + } +} + +func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { + distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) + for i := 0; i < len(countPerBucket); i++ { + count := countPerBucket[i] + + distBuckets[i] = &metricspb.DistributionValue_Bucket{ + Count: count, + } + } + + return distBuckets +} + +func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { + if len(tags) == 0 { + return nil + } + + labelValues := make([]*metricspb.LabelValue, 0, len(tags)) + for _, tag_ := range tags { + labelValues = append(labelValues, &metricspb.LabelValue{ + Value: tag_.Value, + + // It is imperative that we set the "HasValue" attribute, + // in order to distinguish missing a label from the empty string. + // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue + // + // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, + // so the best case that we can use to distinguish missing labels/tags from the + // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have + // a value. + HasValue: tag_.Key.Name() != "", + }) + } + return labelValues +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go new file mode 100644 index 0000000000..68be4c75bd --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go @@ -0,0 +1,17 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const Version = "0.0.1" diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore b/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore new file mode 100644 index 0000000000..85e7c1dfcb --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore @@ -0,0 +1 @@ +/.idea/ diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml new file mode 100644 index 0000000000..0aa9844f42 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml @@ -0,0 +1,123 @@ +# options for analysis running +run: + # default concurrency is a available CPU number + concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 10m + + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + + # include test files or not, default is true + tests: true + + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + skip-dirs: + + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: false + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + skip-files: + + # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. + modules-download-mode: readonly + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true + +# all available settings of specific linters +linters-settings: + govet: + # report about shadowed variables + check-shadowing: true + + # settings per analyzer + settings: + printf: # analyzer name, run `go tool vet help` to see all analyzers + funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + + enable-all: true + # TODO: Enable this and fix the alignment issues. + disable: + - fieldalignment + + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0.8 + + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: contrib.go.opencensus.io/exporter/prometheus + + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + ignore-words: + - cancelled + - metre + - meter + - metres + - kilometre + - kilometres + +linters: + disable: + - errcheck + enable: + - gofmt + - goimports + - golint + - gosec + - govet + - staticcheck + - misspell + - scopelint + - unconvert + - gocritic + - unparam + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test\.go + linters: + - scopelint + - text: "G404:" + linters: + - gosec diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml new file mode 100644 index 0000000000..17afafec2b --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go_import_path: contrib.go.opencensus.io + +go: + - 1.15.x + +env: + global: + GO111MODULE=on + +before_script: + - make install-tools + +script: + - make travis-ci + diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE b/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile new file mode 100644 index 0000000000..cf4d613281 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile @@ -0,0 +1,50 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +LINT=golangci-lint +# TODO decide if we need to change these names. +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') + +.DEFAULT_GOAL := lint-test + +.PHONY: lint-test +lint-test: lint test + +# TODO enable test-with-coverage in travis +.PHONY: travis-ci +travis-ci: lint test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: lint +lint: + $(LINT) run --allow-parallel-runners + +.PHONY: install-tools +install-tools: + cd internal/tools && go install golang.org/x/tools/cmd/cover + cd internal/tools && go install github.com/golangci/golangci-lint/cmd/golangci-lint + diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md b/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md new file mode 100644 index 0000000000..3a9c5d3c8e --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md @@ -0,0 +1,14 @@ +# OpenCensus Go Prometheus Exporter + +[![Build Status](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-prometheus.svg?branch=master)](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-prometheus) [![GoDoc][godoc-image]][godoc-url] + +Provides OpenCensus metrics export support for Prometheus. + +## Installation + +``` +$ go get -u contrib.go.opencensus.io/exporter/prometheus +``` + +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go new file mode 100644 index 0000000000..b94c6d3991 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go @@ -0,0 +1,303 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus contains a Prometheus exporter that supports exporting +// OpenCensus views as Prometheus metrics. +package prometheus // import "contrib.go.opencensus.io/exporter/prometheus" + +import ( + "context" + "fmt" + "log" + "net/http" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/stats/view" +) + +// Exporter exports stats to Prometheus, users need +// to register the exporter as an http.Handler to be +// able to export. +type Exporter struct { + opts Options + g prometheus.Gatherer + c *collector + handler http.Handler +} + +// Options contains options for configuring the exporter. +type Options struct { + Namespace string + Registry *prometheus.Registry + Registerer prometheus.Registerer + Gatherer prometheus.Gatherer + OnError func(err error) + ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views. +} + +// NewExporter returns an exporter that exports stats to Prometheus. +func NewExporter(o Options) (*Exporter, error) { + if o.Registry == nil { + o.Registry = prometheus.NewRegistry() + } + if o.Registerer == nil { + o.Registerer = o.Registry + } + if o.Gatherer == nil { + o.Gatherer = o.Registry + } + + collector := newCollector(o, o.Registerer) + e := &Exporter{ + opts: o, + g: o.Gatherer, + c: collector, + handler: promhttp.HandlerFor(o.Gatherer, promhttp.HandlerOpts{}), + } + collector.ensureRegisteredOnce() + + return e, nil +} + +var _ http.Handler = (*Exporter)(nil) + +// ensureRegisteredOnce invokes reg.Register on the collector itself +// exactly once to ensure that we don't get errors such as +// cannot register the collector: descriptor Desc{fqName: *} +// already exists with the same fully-qualified name and const label values +// which is documented by Prometheus at +// https://github.com/prometheus/client_golang/blob/fcc130e101e76c5d303513d0e28f4b6d732845c7/prometheus/registry.go#L89-L101 +func (c *collector) ensureRegisteredOnce() { + c.registerOnce.Do(func() { + if err := c.reg.Register(c); err != nil { + c.opts.onError(fmt.Errorf("cannot register the collector: %v", err)) + } + }) + +} + +func (o *Options) onError(err error) { + if o.OnError != nil { + o.OnError(err) + } else { + log.Printf("Failed to export to Prometheus: %v", err) + } +} + +// ExportView exports to the Prometheus if view data has one or more rows. +// Each OpenCensus AggregationData will be converted to +// corresponding Prometheus Metric: SumData will be converted +// to Untyped Metric, CountData will be a Counter Metric, +// DistributionData will be a Histogram Metric. +// +// Deprecated: in lieu of metricexport.Reader interface. +func (e *Exporter) ExportView(vd *view.Data) { +} + +// ServeHTTP serves the Prometheus endpoint. +func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { + e.handler.ServeHTTP(w, r) +} + +// collector implements prometheus.Collector +type collector struct { + opts Options + + registerOnce sync.Once + + // reg helps collector register views dynamically. + reg prometheus.Registerer + + // reader reads metrics from all registered producers. + reader *metricexport.Reader +} + +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + de := &descExporter{c: c, descCh: ch} + c.reader.ReadAndExport(de) +} + +// Collect fetches the statistics from OpenCensus +// and delivers them as Prometheus Metrics. +// Collect is invoked every time a prometheus.Gatherer is run +// for example when the HTTP endpoint is invoked by Prometheus. +func (c *collector) Collect(ch chan<- prometheus.Metric) { + me := &metricExporter{c: c, metricCh: ch} + c.reader.ReadAndExport(me) +} + +func newCollector(opts Options, registrar prometheus.Registerer) *collector { + return &collector{ + reg: registrar, + opts: opts, + reader: metricexport.NewReader()} +} + +func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { + var labels prometheus.Labels + switch { + case metric.Resource == nil: + labels = c.opts.ConstLabels + case c.opts.ConstLabels == nil: + labels = metric.Resource.Labels + default: + labels = prometheus.Labels{} + for k, v := range c.opts.ConstLabels { + labels[k] = v + } + // Resource labels overwrite const labels. + for k, v := range metric.Resource.Labels { + labels[k] = v + } + } + + return prometheus.NewDesc( + metricName(c.opts.Namespace, metric), + metric.Descriptor.Description, + toPromLabels(metric.Descriptor.LabelKeys), + labels) +} + +type metricExporter struct { + c *collector + metricCh chan<- prometheus.Metric +} + +// ExportMetrics exports to the Prometheus. +// Each OpenCensus Metric will be converted to +// corresponding Prometheus Metric: +// TypeCumulativeInt64 and TypeCumulativeFloat64 will be a Counter Metric, +// TypeCumulativeDistribution will be a Histogram Metric. +// TypeGaugeFloat64 and TypeGaugeInt64 will be a Gauge Metric +func (me *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + for _, ts := range metric.TimeSeries { + tvs := toLabelValues(ts.LabelValues) + for _, point := range ts.Points { + metric, err := toPromMetric(desc, metric, point, tvs) + if err != nil { + me.c.opts.onError(err) + } else if metric != nil { + me.metricCh <- metric + } + } + } + } + return nil +} + +type descExporter struct { + c *collector + descCh chan<- *prometheus.Desc +} + +// ExportMetrics exports descriptor to the Prometheus. +// It is invoked when request to scrape descriptors is received. +func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + me.descCh <- desc + } + return nil +} + +func toPromLabels(mls []metricdata.LabelKey) (labels []string) { + for _, ml := range mls { + labels = append(labels, sanitize(ml.Key)) + } + return labels +} + +func metricName(namespace string, m *metricdata.Metric) string { + var name string + if namespace != "" { + name = namespace + "_" + } + return name + sanitize(m.Descriptor.Name) +} + +func toPromMetric( + desc *prometheus.Desc, + metric *metricdata.Metric, + point metricdata.Point, + labelValues []string) (prometheus.Metric, error) { + switch metric.Descriptor.Type { + case metricdata.TypeCumulativeFloat64, metricdata.TypeCumulativeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.CounterValue, pv, labelValues...) + + case metricdata.TypeGaugeFloat64, metricdata.TypeGaugeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, pv, labelValues...) + + case metricdata.TypeCumulativeDistribution: + switch v := point.Value.(type) { + case *metricdata.Distribution: + points := make(map[float64]uint64) + // Histograms are cumulative in Prometheus. + // Get cumulative bucket counts. + cumCount := uint64(0) + for i, b := range v.BucketOptions.Bounds { + cumCount += uint64(v.Buckets[i].Count) + points[b] = cumCount + } + return prometheus.NewConstHistogram(desc, uint64(v.Count), v.Sum, points, labelValues...) + default: + return nil, typeMismatchError(point) + } + case metricdata.TypeSummary: + // TODO: [rghetia] add support for TypeSummary. + return nil, nil + default: + return nil, fmt.Errorf("aggregation %T is not yet supported", metric.Descriptor.Type) + } +} + +func toLabelValues(labelValues []metricdata.LabelValue) (values []string) { + for _, lv := range labelValues { + if lv.Present { + values = append(values, lv.Value) + } else { + values = append(values, "") + } + } + return values +} + +func typeMismatchError(point metricdata.Point) error { + return fmt.Errorf("point type %T does not match metric type", point) + +} + +func toPromValue(point metricdata.Point) (float64, error) { + switch v := point.Value.(type) { + case float64: + return v, nil + case int64: + return float64(v), nil + default: + return 0.0, typeMismatchError(point) + } +} diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go new file mode 100644 index 0000000000..9c9a9c4dd7 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go @@ -0,0 +1,38 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +const labelKeySizeLimit = 100 + +// sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + + s = mapper.EscapeMetricName(s) + if s[0] == '_' { + s = "key" + s + } + return s +} diff --git a/vendor/cuelang.org/go/AUTHORS b/vendor/cuelang.org/go/AUTHORS new file mode 100644 index 0000000000..884392fca0 --- /dev/null +++ b/vendor/cuelang.org/go/AUTHORS @@ -0,0 +1,6 @@ +# This is the list of CUE authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google LLC diff --git a/vendor/cuelang.org/go/LICENSE b/vendor/cuelang.org/go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/cuelang.org/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cuelang.org/go/cue/ast/ast.go b/vendor/cuelang.org/go/cue/ast/ast.go new file mode 100644 index 0000000000..6018808abd --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/ast.go @@ -0,0 +1,1057 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast declares the types used to represent syntax trees for CUE +// packages. +package ast // import "cuelang.org/go/cue/ast" + +import ( + "fmt" + "strings" + + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +// ---------------------------------------------------------------------------- +// Interfaces +// +// There are three main classes of nodes: expressions, clauses, and declaration +// nodes. The node names usually match the corresponding CUE spec production +// names to which they correspond. The node fields correspond to the individual +// parts of the respective productions. +// +// All nodes contain position information marking the beginning of the +// corresponding source text segment; it is accessible via the Pos accessor +// method. Nodes may contain additional position info for language constructs +// where comments may be found between parts of the construct (typically any +// larger, parenthesized subpart). That position information is needed to +// properly position comments when printing the construct. + +// A Node represents any node in the abstract syntax tree. +type Node interface { + Pos() token.Pos // position of first character belonging to the node + End() token.Pos // position of first character immediately after the node + + // pos reports the pointer to the position of first character belonging to + // the node or nil if there is no such position. + pos() *token.Pos + + // Deprecated: use ast.Comments + Comments() []*CommentGroup + + // Deprecated: use ast.AddComment + AddComment(*CommentGroup) + commentInfo() *comments +} + +// Name describes the type of n. +func Name(n Node) string { + s := fmt.Sprintf("%T", n) + return strings.ToLower(s[strings.Index(s, "ast.")+4:]) +} + +func getPos(n Node) token.Pos { + p := n.pos() + if p == nil { + return token.NoPos + } + return *p +} + +// SetPos sets a node to the given position, if possible. +func SetPos(n Node, p token.Pos) { + ptr := n.pos() + if ptr == nil { + return + } + *ptr = p +} + +// SetRelPos sets the relative position of a node without modifying its +// file position. Setting it to token.NoRelPos allows a node to adopt default +// formatting. +func SetRelPos(n Node, p token.RelPos) { + ptr := n.pos() + if ptr == nil { + return + } + pos := *ptr + *ptr = pos.WithRel(p) +} + +// An Expr is implemented by all expression nodes. +type Expr interface { + Node + declNode() // An expression can be used as a declaration. + exprNode() +} + +type expr struct{ decl } + +func (expr) exprNode() {} + +// A Decl node is implemented by all declarations. +type Decl interface { + Node + declNode() +} + +type decl struct{} + +func (decl) declNode() {} + +// A Label is any production that can be used as a LHS label. +type Label interface { + Node + labelNode() +} + +type label struct{} + +func (l label) labelNode() {} + +// Clause nodes are part of comprehensions. +type Clause interface { + Node + clauseNode() +} + +type clause struct{} + +func (clause) clauseNode() {} + +func (x *ForClause) clauseNode() {} +func (x *IfClause) clauseNode() {} +func (x *Alias) clauseNode() {} + +// Comments + +type comments struct { + groups *[]*CommentGroup +} + +func (c *comments) commentInfo() *comments { return c } + +func (c *comments) Comments() []*CommentGroup { + if c.groups == nil { + return []*CommentGroup{} + } + return *c.groups +} + +// // AddComment adds the given comments to the fields. +// // If line is true the comment is inserted at the preceding token. + +func (c *comments) AddComment(cg *CommentGroup) { + if cg == nil { + return + } + if c.groups == nil { + a := []*CommentGroup{cg} + c.groups = &a + return + } + + *c.groups = append(*c.groups, cg) + a := *c.groups + for i := len(a) - 2; i >= 0 && a[i].Position > cg.Position; i-- { + a[i], a[i+1] = a[i+1], a[i] + } +} + +func (c *comments) SetComments(cgs []*CommentGroup) { + if c.groups == nil { + a := cgs + c.groups = &a + return + } + *c.groups = cgs +} + +// A Comment node represents a single //-style or /*-style comment. +type Comment struct { + Slash token.Pos // position of "/" starting the comment + Text string // comment text (excluding '\n' for //-style comments) +} + +func (c *Comment) Comments() []*CommentGroup { return nil } +func (c *Comment) AddComment(*CommentGroup) {} +func (c *Comment) commentInfo() *comments { return nil } + +func (c *Comment) Pos() token.Pos { return c.Slash } +func (c *Comment) pos() *token.Pos { return &c.Slash } +func (c *Comment) End() token.Pos { return c.Slash.Add(len(c.Text)) } + +// A CommentGroup represents a sequence of comments +// with no other tokens and no empty lines between. +type CommentGroup struct { + // TODO: remove and use the token position of the first comment. + Doc bool + Line bool // true if it is on the same line as the node's end pos. + + // Position indicates where a comment should be attached if a node has + // multiple tokens. 0 means before the first token, 1 means before the + // second, etc. For instance, for a field, the positions are: + // <0> Label <1> ":" <2> Expr <3> "," <4> + Position int8 + List []*Comment // len(List) > 0 + + decl +} + +func (g *CommentGroup) Pos() token.Pos { return getPos(g) } +func (g *CommentGroup) pos() *token.Pos { return g.List[0].pos() } +func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() } + +func (g *CommentGroup) Comments() []*CommentGroup { return nil } +func (g *CommentGroup) AddComment(*CommentGroup) {} +func (g *CommentGroup) commentInfo() *comments { return nil } + +func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } + +func stripTrailingWhitespace(s string) string { + i := len(s) + for i > 0 && isWhitespace(s[i-1]) { + i-- + } + return s[0:i] +} + +// Text returns the text of the comment. +// Comment markers (//, /*, and */), the first space of a line comment, and +// leading and trailing empty lines are removed. Multiple empty lines are +// reduced to one, and trailing space on lines is trimmed. Unless the result +// is empty, it is newline-terminated. +func (g *CommentGroup) Text() string { + if g == nil { + return "" + } + comments := make([]string, len(g.List)) + for i, c := range g.List { + comments[i] = c.Text + } + + lines := make([]string, 0, 10) // most comments are less than 10 lines + for _, c := range comments { + // Remove comment markers. + // The parser has given us exactly the comment text. + switch c[1] { + case '/': + //-style comment (no newline at the end) + c = c[2:] + // strip first space - required for Example tests + if len(c) > 0 && c[0] == ' ' { + c = c[1:] + } + case '*': + /*-style comment */ + c = c[2 : len(c)-2] + } + + // Split on newlines. + cl := strings.Split(c, "\n") + + // Walk lines, stripping trailing white space and adding to list. + for _, l := range cl { + lines = append(lines, stripTrailingWhitespace(l)) + } + } + + // Remove leading blank lines; convert runs of + // interior blank lines to a single blank line. + n := 0 + for _, line := range lines { + if line != "" || n > 0 && lines[n-1] != "" { + lines[n] = line + n++ + } + } + lines = lines[0:n] + + // Add final "" entry to get trailing newline from Join. + if n > 0 && lines[n-1] != "" { + lines = append(lines, "") + } + + return strings.Join(lines, "\n") +} + +// An Attribute provides meta data about a field. +type Attribute struct { + At token.Pos + Text string // must be a valid attribute format. + + comments + decl +} + +func (a *Attribute) Pos() token.Pos { return a.At } +func (a *Attribute) pos() *token.Pos { return &a.At } +func (a *Attribute) End() token.Pos { return a.At.Add(len(a.Text)) } + +func (a *Attribute) Split() (key, body string) { + s := a.Text + p := strings.IndexByte(s, '(') + if p < 0 || !strings.HasPrefix(s, "@") || !strings.HasSuffix(s, ")") { + return "", "" + } + return a.Text[1:p], a.Text[p+1 : len(s)-1] +} + +// A Field represents a field declaration in a struct. +type Field struct { + Label Label // must have at least one element. + Optional token.Pos + + // No TokenPos: Value must be an StructLit with one field. + TokenPos token.Pos + Token token.Token // ':' or '::', ILLEGAL implies ':' + + Value Expr // the value associated with this field. + + Attrs []*Attribute + + comments + decl +} + +func (d *Field) Pos() token.Pos { return d.Label.Pos() } +func (d *Field) pos() *token.Pos { return d.Label.pos() } +func (d *Field) End() token.Pos { + if len(d.Attrs) > 0 { + return d.Attrs[len(d.Attrs)-1].End() + } + return d.Value.End() +} + +// TODO: make Alias a type of Field. This is possible now we have different +// separator types. + +// An Alias binds another field to the alias name in the current struct. +type Alias struct { + Ident *Ident // field name, always an Ident + Equal token.Pos // position of "=" + Expr Expr // An Ident or SelectorExpr + + comments + decl + expr + label +} + +func (a *Alias) Pos() token.Pos { return a.Ident.Pos() } +func (a *Alias) pos() *token.Pos { return a.Ident.pos() } +func (a *Alias) End() token.Pos { return a.Expr.End() } + +// A Comprehension node represents a comprehension declaration. +type Comprehension struct { + Clauses []Clause // There must be at least one clause. + Value Expr // Must be a struct TODO: change to Struct + + comments + decl + expr // TODO: only allow Comprehension in "Embedding" productions. +} + +func (x *Comprehension) Pos() token.Pos { return getPos(x) } +func (x *Comprehension) pos() *token.Pos { return x.Clauses[0].pos() } +func (x *Comprehension) End() token.Pos { + return x.Value.End() +} + +// ---------------------------------------------------------------------------- +// Expressions and types +// +// An expression is represented by a tree consisting of one +// or more of the following concrete expression nodes. + +// A BadExpr node is a placeholder for expressions containing +// syntax errors for which no correct expression nodes can be +// created. This is different from an ErrorExpr which represents +// an explicitly marked error in the source. +type BadExpr struct { + From, To token.Pos // position range of bad expression + + comments + expr +} + +// A BottomLit indicates an error. +type BottomLit struct { + Bottom token.Pos + + comments + expr +} + +// An Ident node represents an left-hand side identifier. +type Ident struct { + NamePos token.Pos // identifier position + + // This LHS path element may be an identifier. Possible forms: + // foo: a normal identifier + // "foo": JSON compatible + Name string + + Scope Node // scope in which node was found or nil if referring directly + Node Node + + comments + label + expr +} + +// A BasicLit node represents a literal of basic type. +type BasicLit struct { + ValuePos token.Pos // literal position + Kind token.Token // INT, FLOAT, DURATION, or STRING + Value string // literal string; e.g. 42, 0x7f, 3.14, 1_234_567, 1e-9, 2.4i, 'a', '\x7f', "foo", or '\m\n\o' + + comments + expr + label +} + +// TODO: introduce and use NewLabel and NewBytes and perhaps NewText (in the +// later case NewString would return a string or bytes type) to distinguish from +// NewString. Consider how to pass indentation information. + +// NewString creates a new BasicLit with a string value without position. +// It quotes the given string. +// Useful for ASTs generated by code other than the CUE parser. +func NewString(str string) *BasicLit { + str = literal.String.Quote(str) + return &BasicLit{Kind: token.STRING, ValuePos: token.NoPos, Value: str} +} + +// NewNull creates a new BasicLit configured to be a null value. +// Useful for ASTs generated by code other than the CUE parser. +func NewNull() *BasicLit { + return &BasicLit{Kind: token.NULL, Value: "null"} +} + +// NewLit creates a new BasicLit with from a token type and string without +// position. +// Useful for ASTs generated by code other than the CUE parser. +func NewLit(tok token.Token, s string) *BasicLit { + return &BasicLit{Kind: tok, Value: s} +} + +// NewBool creates a new BasicLit with a bool value without position. +// Useful for ASTs generated by code other than the CUE parser. +func NewBool(b bool) *BasicLit { + x := &BasicLit{} + if b { + x.Kind = token.TRUE + x.Value = "true" + } else { + x.Kind = token.FALSE + x.Value = "false" + } + return x +} + +// TODO: +// - use CUE-specific quoting (hoist functionality in export) +// - NewBytes + +// A Interpolation node represents a string or bytes interpolation. +type Interpolation struct { + Elts []Expr // interleaving of strings and expressions. + + comments + expr + label +} + +// A StructLit node represents a literal struct. +type StructLit struct { + Lbrace token.Pos // position of "{" + Elts []Decl // list of elements; or nil + Rbrace token.Pos // position of "}" + + comments + expr +} + +// NewStruct creates a struct from the given fields. +// +// A field is either a *Field, an *Elipsis, *LetClause, a *CommentGroup, or a +// Label, optionally followed by a a token.OPTION to indicate the field is +// optional, optionally followed by a token.ISA to indicate the field is a +// definition followed by an expression for the field value. +// +// It will panic if a values not matching these patterns are given. Useful for +// ASTs generated by code other than the CUE parser. +func NewStruct(fields ...interface{}) *StructLit { + s := &StructLit{ + // Set default positions so that comment attachment is as expected. + Lbrace: token.NoSpace.Pos(), + } + for i := 0; i < len(fields); i++ { + var ( + label Label + optional = token.NoPos + tok = token.ILLEGAL + expr Expr + ) + + switch x := fields[i].(type) { + case *Field: + s.Elts = append(s.Elts, x) + continue + case *CommentGroup: + s.Elts = append(s.Elts, x) + continue + case *Ellipsis: + s.Elts = append(s.Elts, x) + continue + case *LetClause: + s.Elts = append(s.Elts, x) + continue + case *embedding: + s.Elts = append(s.Elts, (*EmbedDecl)(x)) + continue + case Label: + label = x + case string: + label = NewString(x) + default: + panic(fmt.Sprintf("unsupported label type %T", x)) + } + + inner: + for i++; i < len(fields); i++ { + switch x := (fields[i]).(type) { + case Expr: + expr = x + break inner + case token.Token: + switch x { + case token.ISA: + tok = x + case token.OPTION: + optional = token.Blank.Pos() + case token.COLON, token.ILLEGAL: + default: + panic(fmt.Sprintf("invalid token %s", x)) + } + default: + panic(fmt.Sprintf("unsupported expression type %T", x)) + } + } + if expr == nil { + panic("label not matched with expression") + } + s.Elts = append(s.Elts, &Field{ + Label: label, + Optional: optional, + Token: tok, + Value: expr, + }) + } + return s +} + +// Embed can be used in conjunction with NewStruct to embed values. +func Embed(x Expr) *embedding { + return (*embedding)(&EmbedDecl{Expr: x}) +} + +type embedding EmbedDecl + +// A ListLit node represents a literal list. +type ListLit struct { + Lbrack token.Pos // position of "[" + + // TODO: change to embedding or similar. + Elts []Expr // list of composite elements; or nil + Rbrack token.Pos // position of "]" + + comments + expr + label +} + +// NewList creates a list of Expressions. +// Useful for ASTs generated by code other than the CUE parser. +func NewList(exprs ...Expr) *ListLit { + return &ListLit{Elts: exprs} +} + +type Ellipsis struct { + Ellipsis token.Pos // open list if set + Type Expr // type for the remaining elements + + comments + decl + expr +} + +// A ForClause node represents a for clause in a comprehension. +type ForClause struct { + For token.Pos + Key *Ident // allow pattern matching? + // TODO: change to Comma + Colon token.Pos + Value *Ident // allow pattern matching? + In token.Pos + Source Expr + + comments + clause +} + +// A IfClause node represents an if guard clause in a comprehension. +type IfClause struct { + If token.Pos + Condition Expr + + comments + clause +} + +// A LetClause node represents a let clause in a comprehension. +type LetClause struct { + Let token.Pos + Ident *Ident + Equal token.Pos + Expr Expr + + comments + clause + decl +} + +// A ParenExpr node represents a parenthesized expression. +type ParenExpr struct { + Lparen token.Pos // position of "(" + X Expr // parenthesized expression + Rparen token.Pos // position of ")" + + comments + expr + label +} + +// A SelectorExpr node represents an expression followed by a selector. +type SelectorExpr struct { + X Expr // expression + Sel Label // field selector + + comments + expr +} + +// NewSel creates a sequence of selectors. +// Useful for ASTs generated by code other than the CUE parser. +func NewSel(x Expr, sel ...string) Expr { + for _, s := range sel { + x = &SelectorExpr{X: x, Sel: NewIdent(s)} + } + return x +} + +// An IndexExpr node represents an expression followed by an index. +type IndexExpr struct { + X Expr // expression + Lbrack token.Pos // position of "[" + Index Expr // index expression + Rbrack token.Pos // position of "]" + + comments + expr +} + +// An SliceExpr node represents an expression followed by slice indices. +type SliceExpr struct { + X Expr // expression + Lbrack token.Pos // position of "[" + Low Expr // begin of slice range; or nil + High Expr // end of slice range; or nil + Rbrack token.Pos // position of "]" + + comments + expr +} + +// A CallExpr node represents an expression followed by an argument list. +type CallExpr struct { + Fun Expr // function expression + Lparen token.Pos // position of "(" + Args []Expr // function arguments; or nil + Rparen token.Pos // position of ")" + + comments + expr +} + +// NewCall creates a new CallExpr. +// Useful for ASTs generated by code other than the CUE parser. +func NewCall(fun Expr, args ...Expr) *CallExpr { + return &CallExpr{Fun: fun, Args: args} +} + +// A UnaryExpr node represents a unary expression. +type UnaryExpr struct { + OpPos token.Pos // position of Op + Op token.Token // operator + X Expr // operand + + comments + expr +} + +// A BinaryExpr node represents a binary expression. +type BinaryExpr struct { + X Expr // left operand + OpPos token.Pos // position of Op + Op token.Token // operator + Y Expr // right operand + + comments + expr +} + +// NewBinExpr creates for list of expressions of length 2 or greater a chained +// binary expression of the form (((x1 op x2) op x3) ...). For lists of length +// 1 it returns the expression itself. It panics for empty lists. +// Useful for ASTs generated by code other than the CUE parser. +func NewBinExpr(op token.Token, operands ...Expr) Expr { + if len(operands) == 0 { + return nil + } + expr := operands[0] + for _, e := range operands[1:] { + expr = &BinaryExpr{X: expr, Op: op, Y: e} + } + return expr +} + +// token.Pos and End implementations for expression/type nodes. + +func (x *BadExpr) Pos() token.Pos { return x.From } +func (x *BadExpr) pos() *token.Pos { return &x.From } +func (x *Ident) Pos() token.Pos { return x.NamePos } +func (x *Ident) pos() *token.Pos { return &x.NamePos } +func (x *BasicLit) Pos() token.Pos { return x.ValuePos } +func (x *BasicLit) pos() *token.Pos { return &x.ValuePos } +func (x *Interpolation) Pos() token.Pos { return x.Elts[0].Pos() } +func (x *Interpolation) pos() *token.Pos { return x.Elts[0].pos() } +func (x *StructLit) Pos() token.Pos { return getPos(x) } +func (x *StructLit) pos() *token.Pos { + if x.Lbrace == token.NoPos && len(x.Elts) > 0 { + return x.Elts[0].pos() + } + return &x.Lbrace +} + +func (x *ListLit) Pos() token.Pos { return x.Lbrack } +func (x *ListLit) pos() *token.Pos { return &x.Lbrack } +func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis } +func (x *Ellipsis) pos() *token.Pos { return &x.Ellipsis } +func (x *LetClause) Pos() token.Pos { return x.Let } +func (x *LetClause) pos() *token.Pos { return &x.Let } +func (x *ForClause) Pos() token.Pos { return x.For } +func (x *ForClause) pos() *token.Pos { return &x.For } +func (x *IfClause) Pos() token.Pos { return x.If } +func (x *IfClause) pos() *token.Pos { return &x.If } +func (x *ParenExpr) Pos() token.Pos { return x.Lparen } +func (x *ParenExpr) pos() *token.Pos { return &x.Lparen } +func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() } +func (x *SelectorExpr) pos() *token.Pos { return x.X.pos() } +func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() } +func (x *IndexExpr) pos() *token.Pos { return x.X.pos() } +func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() } +func (x *SliceExpr) pos() *token.Pos { return x.X.pos() } +func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() } +func (x *CallExpr) pos() *token.Pos { return x.Fun.pos() } +func (x *UnaryExpr) Pos() token.Pos { return x.OpPos } +func (x *UnaryExpr) pos() *token.Pos { return &x.OpPos } +func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() } +func (x *BinaryExpr) pos() *token.Pos { return x.X.pos() } +func (x *BottomLit) Pos() token.Pos { return x.Bottom } +func (x *BottomLit) pos() *token.Pos { return &x.Bottom } + +func (x *BadExpr) End() token.Pos { return x.To } +func (x *Ident) End() token.Pos { + return x.NamePos.Add(len(x.Name)) +} +func (x *BasicLit) End() token.Pos { return x.ValuePos.Add(len(x.Value)) } + +func (x *Interpolation) End() token.Pos { return x.Elts[len(x.Elts)-1].Pos() } +func (x *StructLit) End() token.Pos { + if x.Rbrace == token.NoPos && len(x.Elts) > 0 { + return x.Elts[len(x.Elts)-1].Pos() + } + return x.Rbrace.Add(1) +} +func (x *ListLit) End() token.Pos { return x.Rbrack.Add(1) } +func (x *Ellipsis) End() token.Pos { + if x.Type != nil { + return x.Type.End() + } + return x.Ellipsis.Add(3) // len("...") +} +func (x *LetClause) End() token.Pos { return x.Expr.End() } +func (x *ForClause) End() token.Pos { return x.Source.End() } +func (x *IfClause) End() token.Pos { return x.Condition.End() } +func (x *ParenExpr) End() token.Pos { return x.Rparen.Add(1) } +func (x *SelectorExpr) End() token.Pos { return x.Sel.End() } +func (x *IndexExpr) End() token.Pos { return x.Rbrack.Add(1) } +func (x *SliceExpr) End() token.Pos { return x.Rbrack.Add(1) } +func (x *CallExpr) End() token.Pos { return x.Rparen.Add(1) } +func (x *UnaryExpr) End() token.Pos { return x.X.End() } +func (x *BinaryExpr) End() token.Pos { return x.Y.End() } +func (x *BottomLit) End() token.Pos { return x.Bottom.Add(1) } + +// ---------------------------------------------------------------------------- +// Convenience functions for Idents + +// NewIdent creates a new Ident without position. +// Useful for ASTs generated by code other than the CUE parser. +func NewIdent(name string) *Ident { + return &Ident{token.NoPos, name, nil, nil, comments{}, label{}, expr{}} +} + +func (id *Ident) String() string { + if id != nil { + return id.Name + } + return "" +} + +// ---------------------------------------------------------------------------- +// Declarations + +// An ImportSpec node represents a single package import. +type ImportSpec struct { + Name *Ident // local package name (including "."); or nil + Path *BasicLit // import path + EndPos token.Pos // end of spec (overrides Path.Pos if nonzero) + + comments +} + +func (*ImportSpec) specNode() {} + +func NewImport(name *Ident, importPath string) *ImportSpec { + importPath = literal.String.Quote(importPath) + path := &BasicLit{Kind: token.STRING, Value: importPath} + return &ImportSpec{Name: name, Path: path} +} + +// Pos and End implementations for spec nodes. + +func (s *ImportSpec) Pos() token.Pos { return getPos(s) } +func (s *ImportSpec) pos() *token.Pos { + if s.Name != nil { + return s.Name.pos() + } + return s.Path.pos() +} + +// func (s *AliasSpec) Pos() token.Pos { return s.Name.Pos() } +// func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() } +// func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() } + +func (s *ImportSpec) End() token.Pos { + if s.EndPos != token.NoPos { + return s.EndPos + } + return s.Path.End() +} + +// A BadDecl node is a placeholder for declarations containing +// syntax errors for which no correct declaration nodes can be +// created. +type BadDecl struct { + From, To token.Pos // position range of bad declaration + + comments + decl +} + +// A ImportDecl node represents a series of import declarations. A valid +// Lparen position (Lparen.Line > 0) indicates a parenthesized declaration. +type ImportDecl struct { + Import token.Pos + Lparen token.Pos // position of '(', if any + Specs []*ImportSpec + Rparen token.Pos // position of ')', if any + + comments + decl +} + +type Spec interface { + Node + specNode() +} + +// An EmbedDecl node represents a single expression used as a declaration. +// The expressions in this declaration is what will be emitted as +// configuration output. +// +// An EmbedDecl may only appear at the top level. +type EmbedDecl struct { + Expr Expr + + comments + decl +} + +// Pos and End implementations for declaration nodes. + +func (d *BadDecl) Pos() token.Pos { return d.From } +func (d *BadDecl) pos() *token.Pos { return &d.From } +func (d *ImportDecl) Pos() token.Pos { return d.Import } +func (d *ImportDecl) pos() *token.Pos { return &d.Import } +func (d *EmbedDecl) Pos() token.Pos { return d.Expr.Pos() } +func (d *EmbedDecl) pos() *token.Pos { return d.Expr.pos() } + +func (d *BadDecl) End() token.Pos { return d.To } +func (d *ImportDecl) End() token.Pos { + if d.Rparen.IsValid() { + return d.Rparen.Add(1) + } + if len(d.Specs) == 0 { + return token.NoPos + } + return d.Specs[0].End() +} +func (d *EmbedDecl) End() token.Pos { return d.Expr.End() } + +// ---------------------------------------------------------------------------- +// Files and packages + +// A File node represents a Go source file. +// +// The Comments list contains all comments in the source file in order of +// appearance, including the comments that are pointed to from other nodes +// via Doc and Comment fields. +type File struct { + Filename string + Decls []Decl // top-level declarations; or nil + + Imports []*ImportSpec // imports in this file + Unresolved []*Ident // unresolved identifiers in this file + + comments +} + +// Preamble returns the declarations of the preamble. +func (f *File) Preamble() []Decl { + p := 0 +outer: + for i, d := range f.Decls { + switch d.(type) { + default: + break outer + + case *Package: + p = i + 1 + case *CommentGroup: + case *Attribute: + case *ImportDecl: + p = i + 1 + } + } + return f.Decls[:p] +} + +func (f *File) VisitImports(fn func(d *ImportDecl)) { + for _, d := range f.Decls { + switch x := d.(type) { + case *CommentGroup: + case *Package: + case *Attribute: + case *ImportDecl: + fn(x) + default: + return + } + } +} + +// PackageName returns the package name associated with this file or "" if no +// package is associated. +func (f *File) PackageName() string { + for _, d := range f.Decls { + switch x := d.(type) { + case *Package: + return x.Name.Name + case *CommentGroup, *Attribute: + default: + return "" + } + } + return "" +} + +func (f *File) Pos() token.Pos { + if len(f.Decls) > 0 { + return f.Decls[0].Pos() + } + if f.Filename != "" { + // TODO. Do something more principled and efficient. + return token.NewFile(f.Filename, -1, 1).Pos(0, 0) + } + return token.NoPos +} + +func (f *File) pos() *token.Pos { + if len(f.Decls) > 0 { + return f.Decls[0].pos() + } + if f.Filename != "" { + return nil + } + return nil +} + +func (f *File) End() token.Pos { + if n := len(f.Decls); n > 0 { + return f.Decls[n-1].End() + } + return token.NoPos +} + +// A Package represents a package clause. +type Package struct { + PackagePos token.Pos // position of "package" pseudo-keyword + Name *Ident // package name + + comments + decl +} + +func (p *Package) Pos() token.Pos { return getPos(p) } +func (p *Package) pos() *token.Pos { + if p.PackagePos != token.NoPos { + return &p.PackagePos + } + if p.Name != nil { + return p.Name.pos() + } + return nil +} + +func (p *Package) End() token.Pos { + if p.Name != nil { + return p.Name.End() + } + return token.NoPos +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/apply.go b/vendor/cuelang.org/go/cue/ast/astutil/apply.go new file mode 100644 index 0000000000..22d12c60f3 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/apply.go @@ -0,0 +1,512 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "encoding/hex" + "fmt" + "hash/fnv" + "reflect" + + "cuelang.org/go/cue/ast" +) + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, and Index methods. +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +// Delete, InsertBefore, and InsertAfter are only defined for modifying +// a StructLit and will panic in any other context. +type Cursor interface { + // Node returns the current Node. + Node() ast.Node + + // Parent returns the parent of the current Node. + Parent() Cursor + + // Index reports the index >= 0 of the current Node in the slice of Nodes + // that contains it, or a value < 0 if the current Node is not part of a + // list. + Index() int + + // Import reports an opaque identifier that refers to the given package. It + // may only be called if the input to apply was an ast.File. If the import + // does not exist, it will be added. + Import(path string) *ast.Ident + + // Replace replaces the current Node with n. + // The replacement node is not walked by Apply. Comments of the old node + // are copied to the new node if it has not yet an comments associated + // with it. + Replace(n ast.Node) + + // Delete deletes the current Node from its containing struct. + // If the current Node is not part of a struct, Delete panics. + Delete() + + // InsertAfter inserts n after the current Node in its containing struct. + // If the current Node is not part of a struct, InsertAfter panics. + // Unless n is wrapped by ApplyRecursively, Apply does not walk n. + InsertAfter(n ast.Node) + + // InsertBefore inserts n before the current Node in its containing struct. + // If the current Node is not part of a struct, InsertBefore panics. + // Unless n is wrapped by ApplyRecursively, Apply does not walk n. + InsertBefore(n ast.Node) + + self() *cursor +} + +// ApplyRecursively indicates that a node inserted with InsertBefore, +// or InsertAfter should be processed recursively. +func ApplyRecursively(n ast.Node) ast.Node { + return recursive{n} +} + +type recursive struct { + ast.Node +} + +type info struct { + f *ast.File + current *declsCursor + + importPatch []*ast.Ident +} + +type cursor struct { + file *info + parent Cursor + node ast.Node + typ interface{} // the type of the node + index int // position of any of the sub types. + replaced bool +} + +func newCursor(parent Cursor, n ast.Node, typ interface{}) *cursor { + return &cursor{ + parent: parent, + typ: typ, + node: n, + index: -1, + } +} + +func fileInfo(c Cursor) (info *info) { + for ; c != nil; c = c.Parent() { + if i := c.self().file; i != nil { + return i + } + } + return nil +} + +func (c *cursor) self() *cursor { return c } +func (c *cursor) Parent() Cursor { return c.parent } +func (c *cursor) Index() int { return c.index } +func (c *cursor) Node() ast.Node { return c.node } + +func (c *cursor) Import(importPath string) *ast.Ident { + info := fileInfo(c) + if info == nil { + return nil + } + + name := ImportPathName(importPath) + + // TODO: come up with something much better. + // For instance, hoist the uniquer form cue/export.go to + // here and make export.go use this. + hash := fnv.New32() + name += hex.EncodeToString(hash.Sum([]byte(importPath)))[:6] + + spec := insertImport(&info.current.decls, &ast.ImportSpec{ + Name: ast.NewIdent(name), + Path: ast.NewString(importPath), + }) + + ident := &ast.Ident{Node: spec} // Name is set later. + info.importPatch = append(info.importPatch, ident) + + ident.Name = name + + return ident +} + +func (c *cursor) Replace(n ast.Node) { + // panic if the value cannot convert to the original type. + reflect.ValueOf(n).Convert(reflect.TypeOf(c.typ).Elem()) + if ast.Comments(n) != nil { + CopyComments(n, c.node) + } + if r, ok := n.(recursive); ok { + n = r.Node + } else { + c.replaced = true + } + c.node = n +} + +func (c *cursor) InsertAfter(n ast.Node) { panic("unsupported") } +func (c *cursor) InsertBefore(n ast.Node) { panic("unsupported") } +func (c *cursor) Delete() { panic("unsupported") } + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. +// +func Apply(node ast.Node, before, after func(Cursor) bool) ast.Node { + apply(&applier{before: before, after: after}, nil, &node) + return node +} + +// A applyVisitor's before method is invoked for each node encountered by Walk. +// If the result applyVisitor w is true, Walk visits each of the children +// of node with the applyVisitor w, followed by a call of w.After. +type applyVisitor interface { + Before(Cursor) applyVisitor + After(Cursor) bool +} + +// Helper functions for common node lists. They may be empty. + +func applyExprList(v applyVisitor, parent Cursor, ptr interface{}, list []ast.Expr) { + c := newCursor(parent, nil, nil) + for i, x := range list { + c.index = i + c.node = x + c.typ = &list[i] + applyCursor(v, c) + if x != c.node { + list[i] = c.node.(ast.Expr) + } + } +} + +type declsCursor struct { + *cursor + decls, after, process []ast.Decl + delete bool +} + +func (c *declsCursor) InsertAfter(n ast.Node) { + if r, ok := n.(recursive); ok { + n = r.Node + c.process = append(c.process, n.(ast.Decl)) + } + c.after = append(c.after, n.(ast.Decl)) +} + +func (c *declsCursor) InsertBefore(n ast.Node) { + if r, ok := n.(recursive); ok { + n = r.Node + c.process = append(c.process, n.(ast.Decl)) + } + c.decls = append(c.decls, n.(ast.Decl)) +} + +func (c *declsCursor) Delete() { c.delete = true } + +func applyDeclList(v applyVisitor, parent Cursor, list []ast.Decl) []ast.Decl { + c := &declsCursor{ + cursor: newCursor(parent, nil, nil), + decls: make([]ast.Decl, 0, len(list)), + } + if file, ok := parent.Node().(*ast.File); ok { + c.cursor.file = &info{f: file, current: c} + } + for i, x := range list { + c.node = x + c.typ = &list[i] + applyCursor(v, c) + if !c.delete { + c.decls = append(c.decls, c.node.(ast.Decl)) + } + c.delete = false + for i := 0; i < len(c.process); i++ { + x := c.process[i] + c.node = x + c.typ = &c.process[i] + applyCursor(v, c) + if c.delete { + panic("cannot delete a node that was added with InsertBefore or InsertAfter") + } + } + c.decls = append(c.decls, c.after...) + c.after = c.after[:0] + c.process = c.process[:0] + } + + // TODO: ultimately, programmatically linked nodes have to be resolved + // at the end. + // if info := c.cursor.file; info != nil { + // done := map[*ast.ImportSpec]bool{} + // for _, ident := range info.importPatch { + // spec := ident.Node.(*ast.ImportSpec) + // if done[spec] { + // continue + // } + // done[spec] = true + + // path, _ := strconv.Unquote(spec.Path) + + // ident.Name = + // } + // } + + return c.decls +} + +func apply(v applyVisitor, parent Cursor, nodePtr interface{}) { + res := reflect.Indirect(reflect.ValueOf(nodePtr)) + n := res.Interface() + node := n.(ast.Node) + c := newCursor(parent, node, nodePtr) + applyCursor(v, c) + if node != c.node { + res.Set(reflect.ValueOf(c.node)) + } +} + +// applyCursor traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, apply is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +func applyCursor(v applyVisitor, c Cursor) { + if v = v.Before(c); v == nil { + return + } + + node := c.Node() + + // TODO: record the comment groups and interleave with the values like for + // parsing and printing? + comments := node.Comments() + for _, cm := range comments { + apply(v, c, &cm) + } + + // apply children + // (the order of the cases matches the order + // of the corresponding node types in go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + for _, cg := range n.List { + apply(v, c, &cg) + } + + case *ast.Attribute: + // nothing to do + + case *ast.Field: + apply(v, c, &n.Label) + if n.Value != nil { + apply(v, c, &n.Value) + } + for _, a := range n.Attrs { + apply(v, c, &a) + } + + case *ast.StructLit: + n.Elts = applyDeclList(v, c, n.Elts) + + // Expressions + case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Interpolation: + applyExprList(v, c, &n, n.Elts) + + case *ast.ListLit: + applyExprList(v, c, &n, n.Elts) + + case *ast.Ellipsis: + if n.Type != nil { + apply(v, c, &n.Type) + } + + case *ast.ParenExpr: + apply(v, c, &n.X) + + case *ast.SelectorExpr: + apply(v, c, &n.X) + apply(v, c, &n.Sel) + + case *ast.IndexExpr: + apply(v, c, &n.X) + apply(v, c, &n.Index) + + case *ast.SliceExpr: + apply(v, c, &n.X) + if n.Low != nil { + apply(v, c, &n.Low) + } + if n.High != nil { + apply(v, c, &n.High) + } + + case *ast.CallExpr: + apply(v, c, &n.Fun) + applyExprList(v, c, &n, n.Args) + + case *ast.UnaryExpr: + apply(v, c, &n.X) + + case *ast.BinaryExpr: + apply(v, c, &n.X) + apply(v, c, &n.Y) + + // Declarations + case *ast.ImportSpec: + if n.Name != nil { + apply(v, c, &n.Name) + } + apply(v, c, &n.Path) + + case *ast.BadDecl: + // nothing to do + + case *ast.ImportDecl: + for _, s := range n.Specs { + apply(v, c, &s) + } + + case *ast.EmbedDecl: + apply(v, c, &n.Expr) + + case *ast.LetClause: + apply(v, c, &n.Ident) + apply(v, c, &n.Expr) + + case *ast.Alias: + apply(v, c, &n.Ident) + apply(v, c, &n.Expr) + + case *ast.Comprehension: + clauses := n.Clauses + for i := range n.Clauses { + apply(v, c, &clauses[i]) + } + apply(v, c, &n.Value) + + // Files and packages + case *ast.File: + n.Decls = applyDeclList(v, c, n.Decls) + + case *ast.Package: + apply(v, c, &n.Name) + + case *ast.ForClause: + if n.Key != nil { + apply(v, c, &n.Key) + } + apply(v, c, &n.Value) + apply(v, c, &n.Source) + + case *ast.IfClause: + apply(v, c, &n.Condition) + + default: + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.After(c) +} + +type applier struct { + before func(Cursor) bool + after func(Cursor) bool + + commentStack []commentFrame + current commentFrame +} + +type commentFrame struct { + cg []*ast.CommentGroup + pos int8 +} + +func (f *applier) Before(c Cursor) applyVisitor { + node := c.Node() + if f.before == nil || (f.before(c) && node == c.Node()) { + f.commentStack = append(f.commentStack, f.current) + f.current = commentFrame{cg: node.Comments()} + f.visitComments(c, f.current.pos) + return f + } + return nil +} + +func (f *applier) After(c Cursor) bool { + f.visitComments(c, 127) + p := len(f.commentStack) - 1 + f.current = f.commentStack[p] + f.commentStack = f.commentStack[:p] + f.current.pos++ + if f.after != nil { + f.after(c) + } + return true +} + +func (f *applier) visitComments(p Cursor, pos int8) { + c := &f.current + for i := 0; i < len(c.cg); i++ { + cg := c.cg[i] + if cg.Position == pos { + continue + } + cursor := newCursor(p, cg, cg) + if f.before == nil || (f.before(cursor) && !cursor.replaced) { + for j, c := range cg.List { + cursor := newCursor(p, c, &c) + if f.before == nil || (f.before(cursor) && !cursor.replaced) { + if f.after != nil { + f.after(cursor) + } + } + cg.List[j] = cursor.node.(*ast.Comment) + } + if f.after != nil { + f.after(cursor) + } + } + c.cg[i] = cursor.node.(*ast.CommentGroup) + } +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/file.go b/vendor/cuelang.org/go/cue/ast/astutil/file.go new file mode 100644 index 0000000000..e060b71194 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/file.go @@ -0,0 +1,38 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// ToFile converts an expression to a File. It will create an import section for +// any of the identifiers in x that refer to an import and will unshadow +// references as appropriate. +func ToFile(x ast.Expr) (*ast.File, error) { + var f *ast.File + if st, ok := x.(*ast.StructLit); ok { + f = &ast.File{Decls: st.Elts} + } else { + ast.SetRelPos(x, token.NoSpace) + f = &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: x}}} + } + + if err := Sanitize(f); err != nil { + return nil, err + } + return f, nil +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go new file mode 100644 index 0000000000..5043e16b20 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go @@ -0,0 +1,461 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements scopes and the objects they contain. + +package astutil + +import ( + "bytes" + "fmt" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// An ErrFunc processes errors. +type ErrFunc func(pos token.Pos, msg string, args ...interface{}) + +// TODO: future development +// +// Resolution currently assigns values along the table below. This is based on +// Go's resolver and is not quite convenient for CUE's purposes. For one, CUE +// allows manually setting resolution and than call astutil.Sanitize to +// normalize the ast.File. Manually assigning resolutions according to the +// below table is rather tedious though. +// +// Instead of using the Scope and Node fields in identifiers, we suggest the +// following assignments: +// +// Reference Node // an Decl or Clause +// Ident *Ident // The identifier in References (optional) +// +// References always refers to the direct element in the scope in which the +// identifier occurs, not the final value, so: *Field, *LetClause, *ForClause, +// etc. In case Ident is defined, it must be the same pointer as the +// referencing identifier. In case it is not defined, the Name of the +// referencing identifier can be used to locate the proper identifier in the +// referenced node. +// +// The Scope field in the original design then loses its function. +// +// Type of reference Scope Node +// Let Clause File/Struct LetClause +// Alias declaration File/Struct Alias (deprecated) +// Illegal Reference File/Struct +// Value +// X in a: X=y Field Alias +// Fields +// X in X: y File/Struct Expr (y) +// X in X=x: y File/Struct Field +// X in X=(x): y File/Struct Field +// X in X="\(x)": y File/Struct Field +// X in [X=x]: y Field Expr (x) +// X in X=[x]: y Field Field +// +// for k, v in ForClause Ident +// let x = y LetClause Ident +// +// Fields inside lambda +// Label Field Expr +// Value Field Field +// Pkg nil ImportSpec + +// Resolve resolves all identifiers in a file. Unresolved identifiers are +// recorded in Unresolved. It will not overwrite already resolved values. +func Resolve(f *ast.File, errFn ErrFunc) { + walk(&scope{errFn: errFn, identFn: resolveIdent}, f) +} + +// Resolve resolves all identifiers in an expression. +// It will not overwrite already resolved values. +func ResolveExpr(e ast.Expr, errFn ErrFunc) { + f := &ast.File{} + walk(&scope{file: f, errFn: errFn, identFn: resolveIdent}, e) +} + +// A Scope maintains the set of named language entities declared +// in the scope and a link to the immediately surrounding (outer) +// scope. +// +type scope struct { + file *ast.File + outer *scope + node ast.Node + index map[string]entry + inField bool + + identFn func(s *scope, n *ast.Ident) bool + nameFn func(name string) + errFn func(p token.Pos, msg string, args ...interface{}) +} + +type entry struct { + node ast.Node + link ast.Node // Alias, LetClause, or Field +} + +func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope { + const n = 4 // initial scope capacity + s := &scope{ + file: f, + outer: outer, + node: node, + index: make(map[string]entry, n), + identFn: outer.identFn, + nameFn: outer.nameFn, + errFn: outer.errFn, + } + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + label := x.Label + + if a, ok := x.Label.(*ast.Alias); ok { + // TODO(legacy): use name := a.Ident.Name once quoted + // identifiers are no longer supported. + label, _ = a.Expr.(ast.Label) + if name, _, _ := ast.LabelName(a.Ident); name != "" { + if _, ok := label.(*ast.ListLit); !ok { + s.insert(name, x, a) + } + } + } + + // default: + name, isIdent, _ := ast.LabelName(label) + if isIdent { + v := x.Value + // Avoid interpreting value aliases at this point. + if a, ok := v.(*ast.Alias); ok { + v = a.Expr + } + s.insert(name, v, x) + } + case *ast.LetClause: + name, isIdent, _ := ast.LabelName(x.Ident) + if isIdent { + s.insert(name, x, x) + } + case *ast.Alias: + name, isIdent, _ := ast.LabelName(x.Ident) + if isIdent { + s.insert(name, x, x) + } + case *ast.ImportDecl: + for _, spec := range x.Specs { + info, _ := ParseImportSpec(spec) + s.insert(info.Ident, spec, spec) + } + } + } + return s +} + +func (s *scope) isLet(n ast.Node) bool { + if _, ok := s.node.(*ast.Field); ok { + return true + } + switch n.(type) { + case *ast.LetClause, *ast.Alias, *ast.Field: + return true + } + return false +} + +func (s *scope) mustBeUnique(n ast.Node) bool { + if _, ok := s.node.(*ast.Field); ok { + return true + } + switch n.(type) { + // TODO: add *ast.ImportSpec when some implementations are moved over to + // Sanitize. + case *ast.ImportSpec, *ast.LetClause, *ast.Alias, *ast.Field: + return true + } + return false +} + +func (s *scope) insert(name string, n, link ast.Node) { + if name == "" { + return + } + if s.nameFn != nil { + s.nameFn(name) + } + // TODO: record both positions. + if outer, _, existing := s.lookup(name); existing.node != nil { + if s.isLet(n) != outer.isLet(existing.node) { + s.errFn(n.Pos(), "cannot have both alias and field with name %q in same scope", name) + return + } else if s.mustBeUnique(n) || outer.mustBeUnique(existing.node) { + if outer == s { + if _, ok := existing.node.(*ast.ImportSpec); ok { + return + // TODO: + s.errFn(n.Pos(), "conflicting declaration %s\n"+ + "\tprevious declaration at %s", + name, existing.node.Pos()) + } else { + s.errFn(n.Pos(), "alias %q redeclared in same scope", name) + } + return + } + // TODO: Should we disallow shadowing of aliases? + // This was the case, but it complicates the transition to + // square brackets. The spec says allow it. + // s.errFn(n.Pos(), "alias %q already declared in enclosing scope", name) + } + } + s.index[name] = entry{node: n, link: link} +} + +func (s *scope) resolveScope(name string, node ast.Node) (scope ast.Node, e entry, ok bool) { + last := s + for s != nil { + if n, ok := s.index[name]; ok && node == n.node { + if last.node == n.node { + return nil, n, true + } + return s.node, n, true + } + s, last = s.outer, s + } + return nil, entry{}, false +} + +func (s *scope) lookup(name string) (p *scope, obj ast.Node, node entry) { + // TODO(#152): consider returning nil for obj if it is a reference to root. + // last := s + if name == "_" { + return nil, nil, entry{} + } + for s != nil { + if n, ok := s.index[name]; ok { + if _, ok := n.node.(*ast.ImportSpec); ok { + return s, nil, n + } + return s, s.node, n + } + // s, last = s.outer, s + s = s.outer + } + return nil, nil, entry{} +} + +func (s *scope) After(n ast.Node) {} +func (s *scope) Before(n ast.Node) (w visitor) { + switch x := n.(type) { + case *ast.File: + s := newScope(x, s, x, x.Decls) + // Support imports. + for _, d := range x.Decls { + walk(s, d) + } + return nil + + case *ast.StructLit: + return newScope(s.file, s, x, x.Elts) + + case *ast.Comprehension: + s = scopeClauses(s, x.Clauses) + walk(s, x.Value) + return nil + + case *ast.Field: + var n ast.Node = x.Label + alias, ok := x.Label.(*ast.Alias) + if ok { + n = alias.Expr + } + + switch label := n.(type) { + case *ast.ParenExpr: + walk(s, label) + + case *ast.Interpolation: + walk(s, label) + + case *ast.ListLit: + if len(label.Elts) != 1 { + break + } + s = newScope(s.file, s, x, nil) + if alias != nil { + if name, _, _ := ast.LabelName(alias.Ident); name != "" { + s.insert(name, x, alias) + } + } + + expr := label.Elts[0] + + if a, ok := expr.(*ast.Alias); ok { + expr = a.Expr + + // Add to current scope, instead of the value's, and allow + // references to bind to these illegally. + // We need this kind of administration anyway to detect + // illegal name clashes, and it allows giving better error + // messages. This puts the burdon on clients of this library + // to detect illegal usage, though. + name, err := ast.ParseIdent(a.Ident) + if err == nil { + s.insert(name, a.Expr, a) + } + } + + ast.Walk(expr, nil, func(n ast.Node) { + if x, ok := n.(*ast.Ident); ok { + for s := s; s != nil && !s.inField; s = s.outer { + if _, ok := s.index[x.Name]; ok { + s.errFn(n.Pos(), + "reference %q in label expression refers to field against which it would be matched", x.Name) + } + } + } + }) + walk(s, expr) + } + + if n := x.Value; n != nil { + if alias, ok := x.Value.(*ast.Alias); ok { + // TODO: this should move into Before once decl attributes + // have been fully deprecated and embed attributes are introduced. + s = newScope(s.file, s, x, nil) + s.insert(alias.Ident.Name, alias, x) + n = alias.Expr + } + s.inField = true + walk(s, n) + s.inField = false + } + + return nil + + case *ast.LetClause: + // Disallow referring to the current LHS name. + name := x.Ident.Name + saved := s.index[name] + delete(s.index, name) // The same name may still appear in another scope + + if x.Expr != nil { + walk(s, x.Expr) + } + s.index[name] = saved + return nil + + case *ast.Alias: + // Disallow referring to the current LHS name. + name := x.Ident.Name + saved := s.index[name] + delete(s.index, name) // The same name may still appear in another scope + + if x.Expr != nil { + walk(s, x.Expr) + } + s.index[name] = saved + return nil + + case *ast.ImportSpec: + return nil + + case *ast.Attribute: + // TODO: tokenize attributes, resolve identifiers and store the ones + // that resolve in a list. + + case *ast.SelectorExpr: + walk(s, x.X) + return nil + + case *ast.Ident: + if s.identFn(s, x) { + return nil + } + } + return s +} + +func resolveIdent(s *scope, x *ast.Ident) bool { + name, ok, _ := ast.LabelName(x) + if !ok { + // TODO: generate error + return false + } + if _, obj, node := s.lookup(name); node.node != nil { + switch { + case x.Node == nil: + x.Node = node.node + x.Scope = obj + + case x.Node == node.node: + x.Scope = obj + + default: // x.Node != node + scope, _, ok := s.resolveScope(name, x.Node) + if !ok { + s.file.Unresolved = append(s.file.Unresolved, x) + } + x.Scope = scope + } + } else { + s.file.Unresolved = append(s.file.Unresolved, x) + } + return true +} + +func scopeClauses(s *scope, clauses []ast.Clause) *scope { + for _, c := range clauses { + switch x := c.(type) { + case *ast.ForClause: + walk(s, x.Source) + s = newScope(s.file, s, x, nil) + if x.Key != nil { + name, err := ast.ParseIdent(x.Key) + if err == nil { + s.insert(name, x.Key, x) + } + } + name, err := ast.ParseIdent(x.Value) + if err == nil { + s.insert(name, x.Value, x) + } + + case *ast.LetClause: + walk(s, x.Expr) + s = newScope(s.file, s, x, nil) + name, err := ast.ParseIdent(x.Ident) + if err == nil { + s.insert(name, x.Ident, x) + } + + default: + walk(s, c) + } + } + return s +} + +// Debugging support +func (s *scope) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "scope %p {", s) + if s != nil && len(s.index) > 0 { + fmt.Fprintln(&buf) + for name := range s.index { + fmt.Fprintf(&buf, "\t%v\n", name) + } + } + fmt.Fprintf(&buf, "}\n") + return buf.String() +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go new file mode 100644 index 0000000000..061a46b6f1 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go @@ -0,0 +1,354 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "fmt" + "math/rand" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// TODO: +// - handle comprehensions +// - change field from foo to "foo" if it isn't referenced, rather than +// relying on introducing a unique alias. +// - change a predeclared identifier reference to use the __ident form, +// instead of introducing an alias. + +// Sanitize rewrites File f in place to be well formed after automated +// construction of an AST. +// +// Rewrites: +// - auto inserts imports associated with Idents +// - unshadows imports associated with idents +// - unshadows references for identifiers that were already resolved. +// +func Sanitize(f *ast.File) error { + z := &sanitizer{ + file: f, + rand: rand.New(rand.NewSource(808)), + + names: map[string]bool{}, + importMap: map[string]*ast.ImportSpec{}, + referenced: map[ast.Node]bool{}, + altMap: map[ast.Node]string{}, + } + + // Gather all names. + walk(&scope{ + errFn: z.errf, + nameFn: z.addName, + identFn: z.markUsed, + }, f) + if z.errs != nil { + return z.errs + } + + // Add imports and unshadow. + s := &scope{ + file: f, + errFn: z.errf, + identFn: z.handleIdent, + index: make(map[string]entry), + } + z.fileScope = s + walk(s, f) + if z.errs != nil { + return z.errs + } + + z.cleanImports() + + return z.errs +} + +type sanitizer struct { + file *ast.File + fileScope *scope + + rand *rand.Rand + + // names is all used names. Can be used to determine a new unique name. + names map[string]bool + referenced map[ast.Node]bool + + // altMap defines an alternative name for an existing entry link (a field, + // alias or let clause). As new names are globally unique, they can be + // safely reused for any unshadowing. + altMap map[ast.Node]string + importMap map[string]*ast.ImportSpec + + errs errors.Error +} + +func (z *sanitizer) errf(p token.Pos, msg string, args ...interface{}) { + z.errs = errors.Append(z.errs, errors.Newf(p, msg, args...)) +} + +func (z *sanitizer) addName(name string) { + z.names[name] = true +} + +func (z *sanitizer) addRename(base string, n ast.Node) (alt string, new bool) { + if name, ok := z.altMap[n]; ok { + return name, false + } + + name := z.uniqueName(base, false) + z.altMap[n] = name + return name, true +} + +func (z *sanitizer) unshadow(parent ast.Node, base string, link ast.Node) string { + name, ok := z.altMap[link] + if !ok { + name = z.uniqueName(base, false) + z.altMap[link] = name + + // Insert new let clause at top to refer to a declaration in possible + // other files. + let := &ast.LetClause{ + Ident: ast.NewIdent(name), + Expr: ast.NewIdent(base), + } + + var decls *[]ast.Decl + + switch x := parent.(type) { + case *ast.File: + decls = &x.Decls + case *ast.StructLit: + decls = &x.Elts + default: + panic(fmt.Sprintf("impossible scope type %T", parent)) + } + + i := 0 + for ; i < len(*decls); i++ { + if (*decls)[i] == link { + break + } + if f, ok := (*decls)[i].(*ast.Field); ok && f.Label == link { + break + } + } + + if i > 0 { + ast.SetRelPos(let, token.NewSection) + } + + a := append((*decls)[:i:i], let) + *decls = append(a, (*decls)[i:]...) + } + return name +} + +func (z *sanitizer) markUsed(s *scope, n *ast.Ident) bool { + if n.Node != nil { + return false + } + _, _, entry := s.lookup(n.String()) + z.referenced[entry.link] = true + return true +} + +func (z *sanitizer) cleanImports() { + z.file.VisitImports(func(d *ast.ImportDecl) { + k := 0 + for _, s := range d.Specs { + if _, ok := z.referenced[s]; ok { + d.Specs[k] = s + k++ + } + } + d.Specs = d.Specs[:k] + }) +} + +func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { + if n.Node == nil { + return true + } + + _, _, node := s.lookup(n.Name) + if node.node == nil { + spec, ok := n.Node.(*ast.ImportSpec) + if !ok { + // Clear node. A reference may have been moved to a different + // file. If not, it should be an error. + n.Node = nil + n.Scope = nil + return false + } + + _ = z.addImport(spec) + info, _ := ParseImportSpec(spec) + z.fileScope.insert(info.Ident, spec, spec) + return true + } + + if x, ok := n.Node.(*ast.ImportSpec); ok { + xi, _ := ParseImportSpec(x) + + if y, ok := node.node.(*ast.ImportSpec); ok { + yi, _ := ParseImportSpec(y) + if xi.ID == yi.ID { // name must be identical as a result of lookup. + z.referenced[y] = true + n.Node = x + n.Scope = nil + return false + } + } + + // Either: + // - the import is shadowed + // - an incorrect import is matched + // In all cases we need to create a new import with a unique name or + // use a previously created one. + spec := z.importMap[xi.ID] + if spec == nil { + name := z.uniqueName(xi.Ident, false) + spec = z.addImport(&ast.ImportSpec{ + Name: ast.NewIdent(name), + Path: x.Path, + }) + z.importMap[xi.ID] = spec + z.fileScope.insert(name, spec, spec) + } + + info, _ := ParseImportSpec(spec) + // TODO(apply): replace n itself directly + n.Name = info.Ident + n.Node = spec + n.Scope = nil + return false + } + + if node.node == n.Node { + return true + } + + // n.Node != node and are both not nil and n.Node is not an ImportSpec. + // This means that either n.Node is illegal or shadowed. + // Look for the scope in which n.Node is defined and add an alias or let. + + parent, e, ok := s.resolveScope(n.Name, n.Node) + if !ok { + // The node isn't within a legal scope within this file. It may only + // possibly shadow a value of another file. We add a top-level let + // clause to refer to this value. + + // TODO(apply): better would be to have resolve use Apply so that we can replace + // the entire ast.Ident, rather than modifying it. + // TODO: resolve to new node or rely on another pass of Resolve? + n.Name = z.unshadow(z.file, n.Name, n) + n.Node = nil + n.Scope = nil + + return false + } + + var name string + // var isNew bool + switch x := e.link.(type) { + case *ast.Field: // referring to regular field. + name, ok = z.altMap[x] + if ok { + break + } + // If this field has not alias, introduce one with a unique name. + // If this has an alias, also introduce a new name. There is a + // possibility that the alias can be used, but it is easier to just + // assign a new name, assuming this case is rather rare. + switch y := x.Label.(type) { + case *ast.Alias: + name = z.unshadow(parent, y.Ident.Name, y) + + case *ast.Ident: + var isNew bool + name, isNew = z.addRename(y.Name, x) + if isNew { + ident := ast.NewIdent(name) + // Move formatting and comments from original label to alias + // identifier. + CopyMeta(ident, y) + ast.SetRelPos(y, token.NoRelPos) + ast.SetComments(y, nil) + x.Label = &ast.Alias{Ident: ident, Expr: y} + } + + default: + // This is an illegal reference. + return false + } + + case *ast.LetClause: + name = z.unshadow(parent, x.Ident.Name, x) + + case *ast.Alias: + name = z.unshadow(parent, x.Ident.Name, x) + + default: + panic(fmt.Sprintf("unexpected link type %T", e.link)) + } + + // TODO(apply): better would be to have resolve use Apply so that we can replace + // the entire ast.Ident, rather than modifying it. + n.Name = name + n.Node = nil + n.Scope = nil + + return true +} + +// uniqueName returns a new name globally unique name of the form +// base_XX ... base_XXXXXXXXXXXXXX or _base or the same pattern with a '_' +// prefix if hidden is true. +// +// It prefers short extensions over large ones, while ensuring the likelihood of +// fast termination is high. There are at least two digits to make it visually +// clearer this concerns a generated number. +// +func (z *sanitizer) uniqueName(base string, hidden bool) string { + if hidden && !strings.HasPrefix(base, "_") { + base = "_" + base + if !z.names[base] { + z.names[base] = true + return base + } + } + + // TODO(go1.13): const mask = 0xff_ffff_ffff_ffff + const mask = 0xffffffffffffff // max bits; stay clear of int64 overflow + const shift = 4 // rate of growth + for n := int64(0x10); ; n = int64(mask&((n< 0 { + name = name[p+1:] + } + return name +} + +// ImportInfo describes the information contained in an ImportSpec. +type ImportInfo struct { + Ident string // identifier used to refer to the import + PkgName string // name of the package + ID string // full import path, including the name + Dir string // import path, excluding the name +} + +// ParseImportSpec returns the name and full path of an ImportSpec. +func ParseImportSpec(spec *ast.ImportSpec) (info ImportInfo, err error) { + str, err := strconv.Unquote(spec.Path.Value) + if err != nil { + return info, err + } + + info.ID = str + + if p := strings.LastIndexByte(str, ':'); p > 0 { + info.Dir = str[:p] + info.PkgName = str[p+1:] + } else { + info.Dir = str + info.PkgName = path.Base(str) + } + + if spec.Name != nil { + info.Ident = spec.Name.Name + } else { + info.Ident = info.PkgName + } + + return info, nil +} + +// CopyComments associates comments of one node with another. +// It may change the relative position of comments. +func CopyComments(to, from ast.Node) { + if from == nil { + return + } + ast.SetComments(to, from.Comments()) +} + +// CopyPosition sets the position of one node to another. +func CopyPosition(to, from ast.Node) { + if from == nil { + return + } + ast.SetPos(to, from.Pos()) +} + +// CopyMeta copies comments and position information from one node to another. +// It returns the destination node. +func CopyMeta(to, from ast.Node) ast.Node { + if from == nil { + return to + } + ast.SetComments(to, from.Comments()) + ast.SetPos(to, from.Pos()) + return to +} + +// insertImport looks up an existing import with the given name and path or will +// add spec if it doesn't exist. It returns a spec in decls matching spec. +func insertImport(decls *[]ast.Decl, spec *ast.ImportSpec) *ast.ImportSpec { + x, _ := ParseImportSpec(spec) + + a := *decls + + var imports *ast.ImportDecl + var orig *ast.ImportSpec + + p := 0 +outer: + for i := 0; i < len(a); i++ { + d := a[i] + switch t := d.(type) { + default: + break outer + + case *ast.Package: + p = i + 1 + case *ast.CommentGroup: + p = i + 1 + case *ast.Attribute: + continue + case *ast.ImportDecl: + p = i + 1 + imports = t + for _, s := range t.Specs { + y, _ := ParseImportSpec(s) + if y.ID != x.ID { + continue + } + orig = s + if x.Ident == "" || y.Ident == x.Ident { + return s + } + } + } + } + + // Import not found, add one. + if imports == nil { + imports = &ast.ImportDecl{} + preamble := append(a[:p:p], imports) + a = append(preamble, a[p:]...) + *decls = a + } + + if orig != nil { + CopyComments(spec, orig) + } + imports.Specs = append(imports.Specs, spec) + ast.SetRelPos(imports.Specs[0], token.NoRelPos) + + return spec +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/walk.go b/vendor/cuelang.org/go/cue/ast/astutil/walk.go new file mode 100644 index 0000000000..2de73d6e91 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/walk.go @@ -0,0 +1,196 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "fmt" + + "cuelang.org/go/cue/ast" +) + +// TODO: use ast.Walk or adopt that version to allow visitors. + +// A visitor's before method is invoked for each node encountered by Walk. +// If the result visitor w is not nil, Walk visits each of the children +// of node with the visitor w, followed by a call of w.After. +type visitor interface { + Before(node ast.Node) (w visitor) + After(node ast.Node) +} + +// Helper functions for common node lists. They may be empty. + +func walkExprList(v visitor, list []ast.Expr) { + for _, x := range list { + walk(v, x) + } +} + +func walkDeclList(v visitor, list []ast.Decl) { + for _, x := range list { + walk(v, x) + } +} + +// walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, walk is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +func walk(v visitor, node ast.Node) { + if v = v.Before(node); v == nil { + return + } + + // TODO: record the comment groups and interleave with the values like for + // parsing and printing? + for _, c := range node.Comments() { + walk(v, c) + } + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + for _, c := range n.List { + walk(v, c) + } + + case *ast.Attribute: + // nothing to do + + case *ast.Field: + walk(v, n.Label) + if n.Value != nil { + walk(v, n.Value) + } + for _, a := range n.Attrs { + walk(v, a) + } + + case *ast.StructLit: + for _, f := range n.Elts { + walk(v, f) + } + + // Expressions + case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Interpolation: + for _, e := range n.Elts { + walk(v, e) + } + + case *ast.ListLit: + walkExprList(v, n.Elts) + + case *ast.Ellipsis: + if n.Type != nil { + walk(v, n.Type) + } + + case *ast.ParenExpr: + walk(v, n.X) + + case *ast.SelectorExpr: + walk(v, n.X) + walk(v, n.Sel) + + case *ast.IndexExpr: + walk(v, n.X) + walk(v, n.Index) + + case *ast.SliceExpr: + walk(v, n.X) + if n.Low != nil { + walk(v, n.Low) + } + if n.High != nil { + walk(v, n.High) + } + + case *ast.CallExpr: + walk(v, n.Fun) + walkExprList(v, n.Args) + + case *ast.UnaryExpr: + walk(v, n.X) + + case *ast.BinaryExpr: + walk(v, n.X) + walk(v, n.Y) + + // Declarations + case *ast.ImportSpec: + if n.Name != nil { + walk(v, n.Name) + } + walk(v, n.Path) + + case *ast.BadDecl: + // nothing to do + + case *ast.ImportDecl: + for _, s := range n.Specs { + walk(v, s) + } + + case *ast.EmbedDecl: + walk(v, n.Expr) + + case *ast.Alias: + walk(v, n.Ident) + walk(v, n.Expr) + + case *ast.Comprehension: + for _, c := range n.Clauses { + walk(v, c) + } + walk(v, n.Value) + + // Files and packages + case *ast.File: + walkDeclList(v, n.Decls) + + case *ast.Package: + // The package identifier isn't really an identifier. Skip it. + + case *ast.LetClause: + walk(v, n.Ident) + walk(v, n.Expr) + + case *ast.ForClause: + if n.Key != nil { + walk(v, n.Key) + } + walk(v, n.Value) + walk(v, n.Source) + + case *ast.IfClause: + walk(v, n.Condition) + + default: + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.After(node) +} diff --git a/vendor/cuelang.org/go/cue/ast/comments.go b/vendor/cuelang.org/go/cue/ast/comments.go new file mode 100644 index 0000000000..09d5402c88 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/comments.go @@ -0,0 +1,46 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// Comments returns all comments associated with a given node. +func Comments(n Node) []*CommentGroup { + c := n.commentInfo() + if c == nil { + return nil + } + return c.Comments() +} + +// AddComment adds the given comment to the node if it supports it. +// If a node does not support comments, such as for CommentGroup or Comment, +// this call has no effect. +func AddComment(n Node, cg *CommentGroup) { + c := n.commentInfo() + if c == nil { + return + } + c.AddComment(cg) +} + +// SetComments replaces all comments of n with the given set of comments. +// If a node does not support comments, such as for CommentGroup or Comment, +// this call has no effect. +func SetComments(n Node, cgs []*CommentGroup) { + c := n.commentInfo() + if c == nil { + return + } + c.SetComments(cgs) +} diff --git a/vendor/cuelang.org/go/cue/ast/ident.go b/vendor/cuelang.org/go/cue/ast/ident.go new file mode 100644 index 0000000000..1f400b2869 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/ident.go @@ -0,0 +1,195 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + // TODO(mpvl): Is this correct? + return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) +} + +// IsValidIdent reports whether str is a valid identifier. +func IsValidIdent(ident string) bool { + if ident == "" { + return false + } + + // TODO: use consumed again to allow #0. + // consumed := false + if strings.HasPrefix(ident, "_") { + ident = ident[1:] + // consumed = true + if len(ident) == 0 { + return true + } + } + if strings.HasPrefix(ident, "#") { + ident = ident[1:] + // consumed = true + } + + // if !consumed { + if r, _ := utf8.DecodeRuneInString(ident); isDigit(r) { + return false + } + // } + + for _, r := range ident { + if isLetter(r) || isDigit(r) || r == '_' || r == '$' { + continue + } + return false + } + return true +} + +// ParseIdent unquotes a possibly quoted identifier and validates +// if the result is valid. +// +// Deprecated: quoted identifiers are deprecated. Use aliases. +func ParseIdent(n *Ident) (string, error) { + return parseIdent(n.NamePos, n.Name) +} + +func parseIdent(pos token.Pos, ident string) (string, error) { + if ident == "" { + return "", errors.Newf(pos, "empty identifier") + } + quoted := false + if ident[0] == '`' { + u, err := strconv.Unquote(ident) + if err != nil { + return "", errors.Newf(pos, "invalid quoted identifier") + } + ident = u + quoted = true + } + + p := 0 + if strings.HasPrefix(ident, "_") { + p++ + if len(ident) == 1 { + return ident, nil + } + } + if strings.HasPrefix(ident[p:], "#") { + p++ + // if len(ident) == p { + // return "", errors.Newf(pos, "invalid identifier '_#'") + // } + } + + if p == 0 || ident[p-1] == '#' { + if r, _ := utf8.DecodeRuneInString(ident[p:]); isDigit(r) { + return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r)) + } + } + + for _, r := range ident[p:] { + if isLetter(r) || isDigit(r) || r == '_' || r == '$' { + continue + } + if r == '-' && quoted { + continue + } + return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r)) + } + + return ident, nil +} + +// LabelName reports the name of a label, whether it is an identifier +// (it binds a value to a scope), and whether it is valid. +// Keywords that are allowed in label positions are interpreted accordingly. +// +// Examples: +// +// Label Result +// foo "foo" true nil +// true "true" true nil +// "foo" "foo" false nil +// "x-y" "x-y" false nil +// "foo "" false invalid string +// "\(x)" "" false errors.Is(err, ErrIsExpression) +// X=foo "foo" true nil +// +func LabelName(l Label) (name string, isIdent bool, err error) { + if a, ok := l.(*Alias); ok { + l, _ = a.Expr.(Label) + } + switch n := l.(type) { + case *ListLit: + // An expression, but not one that can evaluated. + return "", false, errors.Newf(l.Pos(), + "cannot reference fields with square brackets labels outside the field value") + + case *Ident: + // TODO(legacy): use name = n.Name + name, err = ParseIdent(n) + if err != nil { + return "", false, err + } + isIdent = true + // TODO(legacy): remove this return once quoted identifiers are removed. + return name, isIdent, err + + case *BasicLit: + switch n.Kind { + case token.STRING: + // Use strconv to only allow double-quoted, single-line strings. + name, err = strconv.Unquote(n.Value) + if err != nil { + err = errors.Newf(l.Pos(), "invalid") + } + + case token.NULL, token.TRUE, token.FALSE: + name = n.Value + isIdent = true + + default: + // TODO: allow numbers to be fields + // This includes interpolation and template labels. + return "", false, errors.Wrapf(ErrIsExpression, l.Pos(), + "cannot use numbers as fields") + } + + default: + // This includes interpolation and template labels. + return "", false, errors.Wrapf(ErrIsExpression, l.Pos(), + "label is an expression") + } + if !IsValidIdent(name) { + isIdent = false + } + return name, isIdent, err + +} + +// ErrIsExpression reports whether a label is an expression. +// This error is never returned directly. Use errors.Is. +var ErrIsExpression = errors.New("not a concrete label") diff --git a/vendor/cuelang.org/go/cue/ast/walk.go b/vendor/cuelang.org/go/cue/ast/walk.go new file mode 100644 index 0000000000..a23fce454d --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/walk.go @@ -0,0 +1,265 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "cuelang.org/go/cue/token" +) + +// Walk traverses an AST in depth-first order: It starts by calling f(node); +// node must not be nil. If before returns true, Walk invokes f recursively for +// each of the non-nil children of node, followed by a call of after. Both +// functions may be nil. If before is nil, it is assumed to always return true. +// +func Walk(node Node, before func(Node) bool, after func(Node)) { + walk(&inspector{before: before, after: after}, node) +} + +// A visitor's before method is invoked for each node encountered by Walk. +// If the result visitor w is true, Walk visits each of the children +// of node with the visitor w, followed by a call of w.After. +type visitor interface { + Before(node Node) (w visitor) + After(node Node) +} + +// Helper functions for common node lists. They may be empty. + +func walkExprList(v visitor, list []Expr) { + for _, x := range list { + walk(v, x) + } +} + +func walkDeclList(v visitor, list []Decl) { + for _, x := range list { + walk(v, x) + } +} + +// walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, walk is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +func walk(v visitor, node Node) { + if v = v.Before(node); v == nil { + return + } + + // TODO: record the comment groups and interleave with the values like for + // parsing and printing? + for _, c := range Comments(node) { + walk(v, c) + } + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in go) + switch n := node.(type) { + // Comments and fields + case *Comment: + // nothing to do + + case *CommentGroup: + for _, c := range n.List { + walk(v, c) + } + + case *Attribute: + // nothing to do + + case *Field: + walk(v, n.Label) + if n.Value != nil { + walk(v, n.Value) + } + for _, a := range n.Attrs { + walk(v, a) + } + + case *StructLit: + walkDeclList(v, n.Elts) + + // Expressions + case *BottomLit, *BadExpr, *Ident, *BasicLit: + // nothing to do + + case *Interpolation: + for _, e := range n.Elts { + walk(v, e) + } + + case *ListLit: + walkExprList(v, n.Elts) + + case *Ellipsis: + if n.Type != nil { + walk(v, n.Type) + } + + case *ParenExpr: + walk(v, n.X) + + case *SelectorExpr: + walk(v, n.X) + walk(v, n.Sel) + + case *IndexExpr: + walk(v, n.X) + walk(v, n.Index) + + case *SliceExpr: + walk(v, n.X) + if n.Low != nil { + walk(v, n.Low) + } + if n.High != nil { + walk(v, n.High) + } + + case *CallExpr: + walk(v, n.Fun) + walkExprList(v, n.Args) + + case *UnaryExpr: + walk(v, n.X) + + case *BinaryExpr: + walk(v, n.X) + walk(v, n.Y) + + // Declarations + case *ImportSpec: + if n.Name != nil { + walk(v, n.Name) + } + walk(v, n.Path) + + case *BadDecl: + // nothing to do + + case *ImportDecl: + for _, s := range n.Specs { + walk(v, s) + } + + case *EmbedDecl: + walk(v, n.Expr) + + case *LetClause: + walk(v, n.Ident) + walk(v, n.Expr) + + case *Alias: + walk(v, n.Ident) + walk(v, n.Expr) + + case *Comprehension: + for _, c := range n.Clauses { + walk(v, c) + } + walk(v, n.Value) + + // Files and packages + case *File: + walkDeclList(v, n.Decls) + + case *Package: + walk(v, n.Name) + + case *ForClause: + if n.Key != nil { + walk(v, n.Key) + } + walk(v, n.Value) + walk(v, n.Source) + + case *IfClause: + walk(v, n.Condition) + + default: + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.After(node) +} + +type inspector struct { + before func(Node) bool + after func(Node) + + commentStack []commentFrame + current commentFrame +} + +type commentFrame struct { + cg []*CommentGroup + pos int8 +} + +func (f *inspector) Before(node Node) visitor { + if f.before == nil || f.before(node) { + f.commentStack = append(f.commentStack, f.current) + f.current = commentFrame{cg: Comments(node)} + f.visitComments(f.current.pos) + return f + } + return nil +} + +func (f *inspector) After(node Node) { + f.visitComments(127) + p := len(f.commentStack) - 1 + f.current = f.commentStack[p] + f.commentStack = f.commentStack[:p] + f.current.pos++ + if f.after != nil { + f.after(node) + } +} + +func (f *inspector) Token(t token.Token) { + f.current.pos++ +} + +func (f *inspector) setPos(i int8) { + f.current.pos = i +} + +func (f *inspector) visitComments(pos int8) { + c := &f.current + for ; len(c.cg) > 0; c.cg = c.cg[1:] { + cg := c.cg[0] + if cg.Position == pos { + continue + } + if f.before == nil || f.before(cg) { + for _, c := range cg.List { + if f.before == nil || f.before(c) { + if f.after != nil { + f.after(c) + } + } + } + if f.after != nil { + f.after(cg) + } + } + } +} diff --git a/vendor/cuelang.org/go/cue/attribute.go b/vendor/cuelang.org/go/cue/attribute.go new file mode 100644 index 0000000000..dd874d6b39 --- /dev/null +++ b/vendor/cuelang.org/go/cue/attribute.go @@ -0,0 +1,200 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "fmt" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" + "cuelang.org/go/internal/core/export" +) + +// Attribute returns the attribute data for the given key. +// The returned attribute will return an error for any of its methods if there +// is no attribute for the requested key. +func (v Value) Attribute(key string) Attribute { + // look up the attributes + if v.v == nil { + return nonExistAttr(key) + } + // look up the attributes + for _, a := range export.ExtractFieldAttrs(v.v) { + k, _ := a.Split() + if key != k { + continue + } + return newAttr(internal.FieldAttr, a) + } + + return nonExistAttr(key) +} + +func newAttr(k internal.AttrKind, a *ast.Attribute) Attribute { + key, body := a.Split() + x := internal.ParseAttrBody(token.NoPos, body) + x.Name = key + x.Kind = k + return Attribute{x} +} + +func nonExistAttr(key string) Attribute { + a := internal.NewNonExisting(key) + a.Name = key + a.Kind = internal.FieldAttr + return Attribute{a} +} + +// Attributes reports all field attributes for the Value. +// +// To retrieve attributes of multiple kinds, you can bitwise-or kinds together. +// Use ValueKind to query attributes associated with a value. +func (v Value) Attributes(mask AttrKind) []Attribute { + if v.v == nil { + return nil + } + + attrs := []Attribute{} + + if mask&FieldAttr != 0 { + for _, a := range export.ExtractFieldAttrs(v.v) { + attrs = append(attrs, newAttr(internal.FieldAttr, a)) + } + } + + if mask&DeclAttr != 0 { + for _, a := range export.ExtractDeclAttrs(v.v) { + attrs = append(attrs, newAttr(internal.DeclAttr, a)) + } + } + + return attrs +} + +// AttrKind indicates the location of an attribute within CUE source. +type AttrKind int + +const ( + // FieldAttr indicates a field attribute. + // foo: bar @attr() + FieldAttr AttrKind = AttrKind(internal.FieldAttr) + + // DeclAttr indicates a declaration attribute. + // foo: { + // @attr() + // } + DeclAttr AttrKind = AttrKind(internal.DeclAttr) + + // A ValueAttr is a bit mask to request any attribute that is locally + // associated with a field, instead of, for instance, an entire file. + ValueAttr AttrKind = FieldAttr | DeclAttr + + // TODO: Possible future attr kinds + // ElemAttr (is a ValueAttr) + // FileAttr (not a ValueAttr) + + // TODO: Merge: merge namesake attributes. +) + +// An Attribute contains meta data about a field. +type Attribute struct { + attr internal.Attr +} + +// Format implements fmt.Formatter. +func (a Attribute) Format(w fmt.State, verb rune) { + fmt.Fprintf(w, "@%s(%s)", a.attr.Name, a.attr.Body) +} + +var _ fmt.Formatter = &Attribute{} + +// Name returns the name of the attribute, for instance, "json" for @json(...). +func (a *Attribute) Name() string { + return a.attr.Name +} + +// Contents reports the full contents of an attribute within parentheses, so +// contents in @attr(contents). +func (a *Attribute) Contents() string { + return a.attr.Body +} + +// NumArgs reports the number of arguments parsed for this attribute. +func (a *Attribute) NumArgs() int { + return len(a.attr.Fields) +} + +// Arg reports the contents of the ith comma-separated argument of a. +// +// If the argument contains an unescaped equals sign, it returns a key-value +// pair. Otherwise it returns the contents in value. +func (a *Attribute) Arg(i int) (key, value string) { + f := a.attr.Fields[i] + return f.Key(), f.Value() +} + +// RawArg reports the raw contents of the ith comma-separated argument of a, +// including surrounding spaces. +func (a *Attribute) RawArg(i int) string { + return a.attr.Fields[i].Text() +} + +// Kind reports the type of location within CUE source where the attribute +// was specified. +func (a *Attribute) Kind() AttrKind { + return AttrKind(a.attr.Kind) +} + +// Err returns the error associated with this Attribute or nil if this +// attribute is valid. +func (a *Attribute) Err() error { + return a.attr.Err +} + +// String reports the possibly empty string value at the given position or +// an error the attribute is invalid or if the position does not exist. +func (a *Attribute) String(pos int) (string, error) { + return a.attr.String(pos) +} + +// Int reports the integer at the given position or an error if the attribute is +// invalid, the position does not exist, or the value at the given position is +// not an integer. +func (a *Attribute) Int(pos int) (int64, error) { + return a.attr.Int(pos) +} + +// Flag reports whether an entry with the given name exists at position pos or +// onwards or an error if the attribute is invalid or if the first pos-1 entries +// are not defined. +func (a *Attribute) Flag(pos int, key string) (bool, error) { + return a.attr.Flag(pos, key) +} + +// Lookup searches for an entry of the form key=value from position pos onwards +// and reports the value if found. It reports an error if the attribute is +// invalid or if the first pos-1 entries are not defined. +func (a *Attribute) Lookup(pos int, key string) (val string, found bool, err error) { + val, found, err = a.attr.Lookup(pos, key) + + // TODO: remove at some point. This is an ugly hack to simulate the old + // behavior of protobufs. + if !found && a.attr.Name == "protobuf" && key == "type" { + val, err = a.String(1) + found = err == nil + } + return val, found, err +} diff --git a/vendor/cuelang.org/go/cue/build.go b/vendor/cuelang.org/go/cue/build.go new file mode 100644 index 0000000000..82863ee014 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build.go @@ -0,0 +1,157 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/runtime" +) + +// A Runtime is used for creating CUE Values. +// +// Any operation that involves two Values or Instances should originate from +// the same Runtime. +// +// The zero value of Runtime works for legacy reasons, but +// should not be used. It may panic at some point. +// +// Deprecated: use Context. +type Runtime runtime.Runtime + +func (r *Runtime) runtime() *runtime.Runtime { + rt := (*runtime.Runtime)(r) + rt.Init() + return rt +} + +type hiddenRuntime = Runtime + +func (r *Runtime) complete(p *build.Instance, v *adt.Vertex) (*Instance, error) { + idx := r.runtime() + inst := getImportFromBuild(idx, p, v) + inst.ImportPath = p.ImportPath + if inst.Err != nil { + return nil, inst.Err + } + return inst, nil +} + +// Compile compiles the given source into an Instance. The source code may be +// provided as a string, byte slice, io.Reader. The name is used as the file +// name in position information. The source may import builtin packages. Use +// Build to allow importing non-builtin packages. +// +// Deprecated: use Parse or ParseBytes. The use of Instance is being phased out. +func (r *hiddenRuntime) Compile(filename string, source interface{}) (*Instance, error) { + cfg := &runtime.Config{Filename: filename} + v, p := r.runtime().Compile(cfg, source) + return r.complete(p, v) +} + +// CompileFile compiles the given source file into an Instance. The source may +// import builtin packages. Use Build to allow importing non-builtin packages. +// +// Deprecated: use BuildFile. The use of Instance is being phased out. +func (r *hiddenRuntime) CompileFile(file *ast.File) (*Instance, error) { + v, p := r.runtime().CompileFile(nil, file) + return r.complete(p, v) +} + +// CompileExpr compiles the given source expression into an Instance. The source +// may import builtin packages. Use Build to allow importing non-builtin +// packages. +// +// Deprecated: use BuildExpr. The use of Instance is being phased out. +func (r *hiddenRuntime) CompileExpr(expr ast.Expr) (*Instance, error) { + f, err := astutil.ToFile(expr) + if err != nil { + return nil, err + } + v := (*Context)(r).BuildExpr(expr) + err = v.Err() + inst := &Instance{ + index: r.runtime(), + root: v.v, + inst: &build.Instance{ + Files: []*ast.File{f}, + }, + Err: errors.Promote(err, ""), + Incomplete: err != nil, + } + return inst, err +} + +// Parse parses a CUE source value into a CUE Instance. The source code may be +// provided as a string, byte slice, or io.Reader. The name is used as the file +// name in position information. The source may import builtin packages. +// +// Deprecated: use CompileString or CompileBytes. The use of Instance is being +// phased out. +func (r *hiddenRuntime) Parse(name string, source interface{}) (*Instance, error) { + return r.Compile(name, source) +} + +// Build creates an Instance from the given build.Instance. A returned Instance +// may be incomplete, in which case its Err field is set. +// +// Deprecated: use Context.BuildInstance. The use of Instance is being phased +// out. +func (r *hiddenRuntime) Build(p *build.Instance) (*Instance, error) { + v, _ := r.runtime().Build(nil, p) + return r.complete(p, v) +} + +// Deprecated: use cuecontext.Context.BuildInstances. The use of Instance is +// being phased out. +func Build(instances []*build.Instance) []*Instance { + if len(instances) == 0 { + panic("cue: list of instances must not be empty") + } + var r Runtime + a, _ := r.build(instances) + return a +} + +func (r *hiddenRuntime) build(instances []*build.Instance) ([]*Instance, error) { + index := r.runtime() + + loaded := []*Instance{} + + var errs errors.Error + + for _, p := range instances { + v, _ := index.Build(nil, p) + i := getImportFromBuild(index, p, v) + errs = errors.Append(errs, i.Err) + loaded = append(loaded, i) + } + + // TODO: insert imports + return loaded, errs +} + +// FromExpr creates an instance from an expression. +// Any references must be resolved beforehand. +// +// Deprecated: use CompileExpr +func (r *hiddenRuntime) FromExpr(expr ast.Expr) (*Instance, error) { + return r.CompileFile(&ast.File{ + Decls: []ast.Decl{&ast.EmbedDecl{Expr: expr}}, + }) +} diff --git a/vendor/cuelang.org/go/cue/build/context.go b/vendor/cuelang.org/go/cue/build/context.go new file mode 100644 index 0000000000..664326eeef --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/context.go @@ -0,0 +1,128 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package build defines data types and utilities for defining CUE configuration +// instances. +// +// This package enforces the rules regarding packages and instances as defined +// in the spec, but it leaves any other details, as well as handling of modules, +// up to the implementation. +// +// A full implementation of instance loading can be found in the loader package. +// +// WARNING: this packages may change. It is fine to use load and cue, who both +// use this package. +package build + +import ( + "context" + + "cuelang.org/go/cue/ast" +) + +// A Context keeps track of state of building instances and caches work. +type Context struct { + ctxt context.Context + + loader LoadFunc + parseFunc func(str string, src interface{}) (*ast.File, error) + + initialized bool + + imports map[string]*Instance +} + +// NewInstance creates an instance for this Context. +func (c *Context) NewInstance(dir string, f LoadFunc) *Instance { + if c == nil { + c = &Context{} + } + if f == nil { + f = c.loader + } + return &Instance{ + ctxt: c, + loadFunc: f, + Dir: dir, + } +} + +// Complete finishes the initialization of an instance. All files must have +// been added with AddFile before this call. +func (inst *Instance) Complete() error { + if inst.done { + return inst.Err + } + inst.done = true + + err := inst.complete() + if err != nil { + inst.ReportError(err) + } + if inst.Err != nil { + inst.Incomplete = true + return inst.Err + } + return nil +} + +func (c *Context) init() { + if !c.initialized { + c.initialized = true + c.ctxt = context.Background() + c.imports = map[string]*Instance{} + } +} + +// Options: +// - certain parse modes +// - parallellism +// - error handler (allows cancelling the context) +// - file set. + +// NewContext creates a new build context. +// +// All instances must be created with a context. +func NewContext(opts ...Option) *Context { + c := &Context{} + for _, o := range opts { + o(c) + } + c.init() + return c +} + +// Option define build options. +type Option func(c *Context) + +// Loader sets parsing options. +func Loader(f LoadFunc) Option { + return func(c *Context) { c.loader = f } +} + +// ParseFile is called to read and parse each file +// when building syntax tree. +// It must be safe to call ParseFile simultaneously from multiple goroutines. +// If ParseFile is nil, the loader will uses parser.ParseFile. +// +// ParseFile should parse the source from src and use filename only for +// recording position information. +// +// An application may supply a custom implementation of ParseFile +// to change the effective file contents or the behavior of the parser, +// or to modify the syntax tree. For example, changing the backwards +// compatibility. +func ParseFile(f func(filename string, src interface{}) (*ast.File, error)) Option { + return func(c *Context) { c.parseFunc = f } +} diff --git a/vendor/cuelang.org/go/cue/build/doc.go b/vendor/cuelang.org/go/cue/build/doc.go new file mode 100644 index 0000000000..52421c65d8 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package build defines collections of CUE files to build an instance. +package build // import "cuelang.org/go/cue/build" diff --git a/vendor/cuelang.org/go/cue/build/file.go b/vendor/cuelang.org/go/cue/build/file.go new file mode 100644 index 0000000000..7b22d2eda6 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/file.go @@ -0,0 +1,86 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build + +import "cuelang.org/go/cue/errors" + +// A File represents a file that is part of the build process. +type File struct { + Filename string `json:"filename"` + + Encoding Encoding `json:"encoding,omitempty"` + Interpretation Interpretation `json:"interpretation,omitempty"` + Form Form `json:"form,omitempty"` + Tags map[string]string `json:"tags,omitempty"` // code=go + + ExcludeReason errors.Error `json:"-"` + Source interface{} `json:"-"` // TODO: swap out with concrete type. +} + +// A Encoding indicates a file format for representing a program. +type Encoding string + +const ( + CUE Encoding = "cue" + JSON Encoding = "json" + YAML Encoding = "yaml" + JSONL Encoding = "jsonl" + Text Encoding = "text" + Binary Encoding = "binary" + Protobuf Encoding = "proto" + TextProto Encoding = "textproto" + BinaryProto Encoding = "pb" + + // TODO: + // TOML + + Code Encoding = "code" // Programming languages +) + +// An Interpretation determines how a certain program should be interpreted. +// For instance, data may be interpreted as describing a schema, which itself +// can be converted to a CUE schema. +type Interpretation string + +const ( + // Auto interprets the underlying data file as data, JSON Schema or OpenAPI, + // depending on the existence of certain marker fields. + // + // JSON Schema is identified by a top-level "$schema" field with a URL + // of the form "https?://json-schema.org/.*schema#?". + // + // OpenAPI is identified by the existence of a top-level field "openapi" + // with a major semantic version of 3, as well as the existence of + // the info.title and info.version fields. + // + // In all other cases, the underlying data is interpreted as is. + Auto Interpretation = "auto" + JSONSchema Interpretation = "jsonschema" + OpenAPI Interpretation = "openapi" + ProtobufJSON Interpretation = "pb" +) + +// A Form specifies the form in which a program should be represented. +type Form string + +const ( + Full Form = "full" + Schema Form = "schema" + Struct Form = "struct" + Final Form = "final" // picking default values, may be non-concrete + Graph Form = "graph" // Data only, but allow references + DAG Form = "dag" // Like graph, but don't allow cycles + Data Form = "data" // always final +) diff --git a/vendor/cuelang.org/go/cue/build/import.go b/vendor/cuelang.org/go/cue/build/import.go new file mode 100644 index 0000000000..996edb0afe --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/import.go @@ -0,0 +1,170 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build + +import ( + "sort" + "strconv" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +type LoadFunc func(pos token.Pos, path string) *Instance + +type cueError = errors.Error + +type buildError struct { + cueError + inputs []token.Pos +} + +func (e *buildError) InputPositions() []token.Pos { + return e.inputs +} + +func (inst *Instance) complete() errors.Error { + // TODO: handle case-insensitive collisions. + // dir := inst.Dir + // names := []string{} + // for _, src := range sources { + // names = append(names, src.path) + // } + // f1, f2 := str.FoldDup(names) + // if f1 != "" { + // return nil, fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2) + // } + + var ( + c = inst.ctxt + imported = map[string][]token.Pos{} + ) + + for _, f := range inst.Files { + for _, decl := range f.Decls { + d, ok := decl.(*ast.ImportDecl) + if !ok { + continue + } + for _, spec := range d.Specs { + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + inst.Err = errors.Append(inst.Err, + errors.Newf( + spec.Path.Pos(), + "%s: parser returned invalid quoted string: <%s>", + f.Filename, quoted)) + } + imported[path] = append(imported[path], spec.Pos()) + } + } + } + + paths := make([]string, 0, len(imported)) + for path := range imported { + paths = append(paths, path) + if path == "" { + return &buildError{ + errors.Newf(token.NoPos, "empty import path"), + imported[path], + } + } + } + + sort.Strings(paths) + + if inst.loadFunc != nil { + for i, path := range paths { + isLocal := IsLocalImport(path) + if isLocal { + // path = dirToImportPath(filepath.Join(dir, path)) + } + + imp := c.imports[path] + if imp == nil { + pos := token.NoPos + if len(imported[path]) > 0 { + pos = imported[path][0] + } + imp = inst.loadFunc(pos, path) + if imp == nil { + continue + } + if imp.Err != nil { + return errors.Wrapf(imp.Err, pos, "import failed") + } + imp.ImportPath = path + // imp.parent = inst + c.imports[path] = imp + // imp.parent = nil + } else if imp.parent != nil { + // TODO: report a standard cycle message. + // cycle is now handled explicitly in loader + } + paths[i] = imp.ImportPath + + inst.addImport(imp) + if imp.Incomplete { + inst.Incomplete = true + } + } + } + + inst.ImportPaths = paths + inst.ImportPos = imported + + // Build full dependencies + deps := make(map[string]*Instance) + var q []*Instance + q = append(q, inst.Imports...) + for i := 0; i < len(q); i++ { + p1 := q[i] + path := p1.ImportPath + // The same import path could produce an error or not, + // depending on what tries to import it. + // Prefer to record entries with errors, so we can report them. + // p0 := deps[path] + // if err0, err1 := lastError(p0), lastError(p1); p0 == nil || err1 != nil && (err0 == nil || len(err0.ImportStack) > len(err1.ImportStack)) { + // deps[path] = p1 + // for _, p2 := range p1.Imports { + // if deps[p2.ImportPath] != p2 { + // q = append(q, p2) + // } + // } + // } + if _, ok := deps[path]; !ok { + deps[path] = p1 + } + } + inst.Deps = make([]string, 0, len(deps)) + for dep := range deps { + inst.Deps = append(inst.Deps, dep) + } + sort.Strings(inst.Deps) + + for _, dep := range inst.Deps { + p1 := deps[dep] + if p1 == nil { + panic("impossible: missing entry in package cache for " + dep + " imported by " + inst.ImportPath) + } + if p1.Err != nil { + inst.DepsErrors = append(inst.DepsErrors, p1.Err) + } + } + + return nil +} diff --git a/vendor/cuelang.org/go/cue/build/instance.go b/vendor/cuelang.org/go/cue/build/instance.go new file mode 100644 index 0000000000..cc0abb8ae0 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/instance.go @@ -0,0 +1,287 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build + +import ( + "fmt" + pathpkg "path" + "path/filepath" + "strings" + "unicode" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// An Instance describes the collection of files, and its imports, necessary +// to build a CUE instance. +// +// A typical way to create an Instance is to use the cue/load package. +type Instance struct { + ctxt *Context + + BuildFiles []*File // files to be included in the build + IgnoredFiles []*File // files excluded for this build + OrphanedFiles []*File // recognized file formats not part of any build + InvalidFiles []*File // could not parse these files + UnknownFiles []*File // unknown file types + + User bool // True if package was created from individual files. + + // Files contains the AST for all files part of this instance. + // TODO: the intent is to deprecate this in favor of BuildFiles. + Files []*ast.File + + loadFunc LoadFunc + done bool + + // PkgName is the name specified in the package clause. + PkgName string + hasName bool + + // ImportPath returns the unique path to identify an imported instance. + // + // Instances created with NewInstance do not have an import path. + ImportPath string + + // Imports lists the instances of all direct imports of this instance. + Imports []*Instance + + // The Err for loading this package or nil on success. This does not + // include any errors of dependencies. Incomplete will be set if there + // were any errors in dependencies. + Err errors.Error + + parent *Instance // TODO: for cycle detection + + // The following fields are for informative purposes and are not used by + // the cue package to create an instance. + + // DisplayPath is a user-friendly version of the package or import path. + DisplayPath string + + // Module defines the module name of a package. It must be defined if + // the packages within the directory structure of the module are to be + // imported by other packages, including those within the module. + Module string + + // Root is the root of the directory hierarchy, it may be "" if this an + // instance has no imports. + // If Module != "", this corresponds to the module root. + // Root/pkg is the directory that holds third-party packages. + Root string // root directory of hierarchy ("" if unknown) + + // Dir is the package directory. A package may also include files from + // ancestor directories, up to the module file. + Dir string + + // NOTICE: the below tags may change in the future. + + // ImportComment is the path in the import comment on the package statement. + ImportComment string `api:"alpha"` + + // AllTags are the build tags that can influence file selection in this + // directory. + AllTags []string `api:"alpha"` + + // Incomplete reports whether any dependencies had an error. + Incomplete bool `api:"alpha"` + + // Dependencies + // ImportPaths gives the transitive dependencies of all imports. + ImportPaths []string `api:"alpha"` + ImportPos map[string][]token.Pos `api:"alpha"` // line information for Imports + + Deps []string `api:"alpha"` + DepsErrors []error `api:"alpha"` + Match []string `api:"alpha"` +} + +// RelPath reports the path of f relative to the root of the instance's module +// directory. The full path is returned if a relative path could not be found. +func (inst *Instance) RelPath(f *File) string { + p, err := filepath.Rel(inst.Root, f.Filename) + if err != nil { + return f.Filename + } + return p +} + +// ID returns the package ID unique for this module. +func (inst *Instance) ID() string { + if s := inst.ImportPath; s != "" { + return s + } + if inst.PkgName == "" { + return "_" + } + s := fmt.Sprintf("%s:%s", inst.Module, inst.PkgName) + return s +} + +// Dependencies reports all Instances on which this instance depends. +func (inst *Instance) Dependencies() []*Instance { + // TODO: as cyclic dependencies are not allowed, we could just not check. + // Do for safety now and remove later if needed. + return appendDependencies(nil, inst, map[*Instance]bool{}) +} + +func appendDependencies(a []*Instance, inst *Instance, done map[*Instance]bool) []*Instance { + for _, d := range inst.Imports { + if done[d] { + continue + } + a = append(a, d) + done[d] = true + a = appendDependencies(a, d, done) + } + return a +} + +// Abs converts relative path used in the one of the file fields to an +// absolute one. +func (inst *Instance) Abs(path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(inst.Root, path) +} + +func (inst *Instance) setPkg(pkg string) bool { + if !inst.hasName { + inst.hasName = true + inst.PkgName = pkg + return true + } + return false +} + +// ReportError reports an error processing this instance. +func (inst *Instance) ReportError(err errors.Error) { + inst.Err = errors.Append(inst.Err, err) +} + +// Context defines the build context for this instance. All files defined +// in Syntax as well as all imported instances must be created using the +// same build context. +func (inst *Instance) Context() *Context { + return inst.ctxt +} + +func (inst *Instance) parse(name string, src interface{}) (*ast.File, error) { + if inst.ctxt != nil && inst.ctxt.parseFunc != nil { + return inst.ctxt.parseFunc(name, src) + } + return parser.ParseFile(name, src, parser.ParseComments) +} + +// LookupImport defines a mapping from an ImportSpec's ImportPath to Instance. +func (inst *Instance) LookupImport(path string) *Instance { + path = inst.expandPath(path) + for _, inst := range inst.Imports { + if inst.ImportPath == path { + return inst + } + } + return nil +} + +func (inst *Instance) addImport(imp *Instance) { + for _, inst := range inst.Imports { + if inst.ImportPath == imp.ImportPath { + if inst != imp { + panic("import added multiple times with different instances") + } + return + } + } + inst.Imports = append(inst.Imports, imp) +} + +// AddFile adds the file with the given name to the list of files for this +// instance. The file may be loaded from the cache of the instance's context. +// It does not process the file's imports. The package name of the file must +// match the package name of the instance. +// +// Deprecated: use AddSyntax or wait for this to be renamed using a new +// signature. +func (inst *Instance) AddFile(filename string, src interface{}) error { + file, err := inst.parse(filename, src) + if err != nil { + // should always be an errors.List, but just in case. + err := errors.Promote(err, "error adding file") + inst.ReportError(err) + return err + } + + return inst.AddSyntax(file) +} + +// AddSyntax adds the given file to list of files for this instance. The package +// name of the file must match the package name of the instance. +func (inst *Instance) AddSyntax(file *ast.File) errors.Error { + astutil.Resolve(file, func(pos token.Pos, msg string, args ...interface{}) { + inst.Err = errors.Append(inst.Err, errors.Newf(pos, msg, args...)) + }) + _, pkg, pos := internal.PackageInfo(file) + if pkg != "" && pkg != "_" && !inst.setPkg(pkg) && pkg != inst.PkgName { + err := errors.Newf(pos, + "package name %q conflicts with previous package name %q", + pkg, inst.PkgName) + inst.ReportError(err) + return err + } + inst.Files = append(inst.Files, file) + return nil +} + +func (inst *Instance) expandPath(path string) string { + isLocal := IsLocalImport(path) + if isLocal { + path = dirToImportPath(filepath.Join(inst.Dir, path)) + } + return path +} + +// dirToImportPath returns the pseudo-import path we use for a package +// outside the CUE path. It begins with _/ and then contains the full path +// to the directory. If the package lives in c:\home\gopher\my\pkg then +// the pseudo-import path is _/c_/home/gopher/my/pkg. +// Using a pseudo-import path like this makes the ./ imports no longer +// a special case, so that all the code to deal with ordinary imports works +// automatically. +func dirToImportPath(dir string) string { + return pathpkg.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir))) +} + +func makeImportValid(r rune) rune { + // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport. + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return '_' + } + return r +} + +// IsLocalImport reports whether the import path is +// a local import path, like ".", "..", "./foo", or "../foo". +func IsLocalImport(path string) bool { + return path == "." || path == ".." || + strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") +} diff --git a/vendor/cuelang.org/go/cue/builtin.go b/vendor/cuelang.org/go/cue/builtin.go new file mode 100644 index 0000000000..74aa56bbf7 --- /dev/null +++ b/vendor/cuelang.org/go/cue/builtin.go @@ -0,0 +1,31 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" +) + +func pos(n adt.Node) (p token.Pos) { + if n == nil { + return + } + src := n.Source() + if src == nil { + return + } + return src.Pos() +} diff --git a/vendor/cuelang.org/go/cue/builtinutil.go b/vendor/cuelang.org/go/cue/builtinutil.go new file mode 100644 index 0000000000..2bfd8adf74 --- /dev/null +++ b/vendor/cuelang.org/go/cue/builtinutil.go @@ -0,0 +1,45 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +// TODO: this code could be generated, but currently isn't. + +type valueSorter struct { + a []Value + cmp Value + err error +} + +func (s *valueSorter) ret() ([]Value, error) { + if s.err != nil { + return nil, s.err + } + // The input slice is already a copy and that we can modify it safely. + return s.a, nil +} + +func (s *valueSorter) Len() int { return len(s.a) } +func (s *valueSorter) Swap(i, j int) { s.a[i], s.a[j] = s.a[j], s.a[i] } +func (s *valueSorter) Less(i, j int) bool { + v := s.cmp.Fill(s.a[i], "x") + v = v.Fill(s.a[j], "y") + + isLess, err := v.Lookup("less").Bool() + if err != nil && s.err == nil { + s.err = err + return true + } + return isLess +} diff --git a/vendor/cuelang.org/go/cue/context.go b/vendor/cuelang.org/go/cue/context.go new file mode 100644 index 0000000000..3da628f581 --- /dev/null +++ b/vendor/cuelang.org/go/cue/context.go @@ -0,0 +1,473 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/compile" + "cuelang.org/go/internal/core/convert" + "cuelang.org/go/internal/core/debug" + "cuelang.org/go/internal/core/eval" + "cuelang.org/go/internal/core/runtime" +) + +// A Context is used for creating CUE Values. +// +// A Context keeps track of loaded instances, indices of internal +// representations of values, and defines the set of supported builtins. Any +// operation that involves two Values should originate from the same Context. +// +// Use +// +// ctx := cuecontext.New() +// +// to create a new Context. +type Context runtime.Runtime + +func (c *Context) runtime() *runtime.Runtime { + rt := (*runtime.Runtime)(c) + return rt +} + +func (c *Context) ctx() *adt.OpContext { + return newContext(c.runtime()) +} + +// Context reports the Context with which this value was created. +func (v Value) Context() *Context { + return (*Context)(v.idx) +} + +// A BuildOption defines options for the various build-related methods of +// Context. +type BuildOption func(o *runtime.Config) + +// Scope defines a context in which to resolve unresolved identifiers. +// +// Only one scope may be given. It panics if more than one scope is given +// or if the Context in which scope was created differs from the one where +// this option is used. +func Scope(scope Value) BuildOption { + return func(o *runtime.Config) { + if o.Runtime != scope.idx { + panic("incompatible runtime") + } + if o.Scope != nil { + panic("more than one scope is given") + } + o.Scope = valueScope(scope) + } +} + +// Filename assigns a filename to parsed content. +func Filename(filename string) BuildOption { + return func(o *runtime.Config) { o.Filename = filename } +} + +// ImportPath defines the import path to use for building CUE. The import path +// influences the scope in which identifiers occurring in the input CUE are +// defined. Passing the empty string is equal to not specifying this option. +// +// This option is typically not necessary when building using a build.Instance, +// but takes precedence otherwise. +func ImportPath(path string) BuildOption { + return func(o *runtime.Config) { o.ImportPath = path } +} + +// InferBuiltins allows unresolved references to bind to builtin packages with a +// unique package name. +// +// This option is intended for evaluating expressions in a context where import +// statements cannot be used. It is not recommended to use this for evaluating +// CUE files. +func InferBuiltins(elide bool) BuildOption { + return func(o *runtime.Config) { + o.Imports = func(x *ast.Ident) (pkgPath string) { + return o.Runtime.BuiltinPackagePath(x.Name) + } + } +} + +func (c *Context) parseOptions(options []BuildOption) (cfg runtime.Config) { + cfg.Runtime = (*runtime.Runtime)(c) + for _, f := range options { + f(&cfg) + } + return cfg +} + +// BuildInstance creates a Value from the given build.Instance. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +func (c *Context) BuildInstance(i *build.Instance, options ...BuildOption) Value { + cfg := c.parseOptions(options) + v, err := c.runtime().Build(&cfg, i) + if err != nil { + return c.makeError(err) + } + return c.make(v) +} + +func (c *Context) makeError(err errors.Error) Value { + b := &adt.Bottom{Err: err} + node := &adt.Vertex{BaseValue: b} + node.UpdateStatus(adt.Finalized) + node.AddConjunct(adt.MakeRootConjunct(nil, b)) + return c.make(node) +} + +// BuildInstances creates a Value for each of the given instances and reports +// the combined errors or nil if there were no errors. +func (c *Context) BuildInstances(instances []*build.Instance) ([]Value, error) { + var errs errors.Error + var a []Value + for _, b := range instances { + v, err := c.runtime().Build(nil, b) + if err != nil { + errs = errors.Append(errs, err) + a = append(a, c.makeError(err)) + } else { + a = append(a, c.make(v)) + } + } + return a, errs +} + +// BuildFile creates a Value from f. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +func (c *Context) BuildFile(f *ast.File, options ...BuildOption) Value { + cfg := c.parseOptions(options) + return c.compile(c.runtime().CompileFile(&cfg, f)) +} + +func (c *Context) compile(v *adt.Vertex, p *build.Instance) Value { + if p.Err != nil { + return c.makeError(p.Err) + } + return c.make(v) +} + +// BuildExpr creates a Value from x. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +func (c *Context) BuildExpr(x ast.Expr, options ...BuildOption) Value { + r := c.runtime() + cfg := c.parseOptions(options) + + ctx := c.ctx() + + // TODO: move to runtime?: it probably does not make sense to treat BuildExpr + // and the expression resulting from CompileString differently. + astutil.ResolveExpr(x, errFn) + + pkgPath := cfg.ImportPath + if pkgPath == "" { + pkgPath = anonymousPkg + } + + conjunct, err := compile.Expr(&cfg.Config, r, pkgPath, x) + if err != nil { + return c.makeError(err) + } + v := adt.Resolve(ctx, conjunct) + + return c.make(v) +} + +func errFn(pos token.Pos, msg string, args ...interface{}) {} + +// resolveExpr binds unresolved expressions to values in the expression or v. +func resolveExpr(ctx *adt.OpContext, v Value, x ast.Expr) adt.Value { + cfg := &compile.Config{Scope: valueScope(v)} + + astutil.ResolveExpr(x, errFn) + + c, err := compile.Expr(cfg, ctx, anonymousPkg, x) + if err != nil { + return &adt.Bottom{Err: err} + } + return adt.Resolve(ctx, c) +} + +// anonymousPkg reports a package path that can never resolve to a valid package. +const anonymousPkg = "_" + +// CompileString parses and build a Value from the given source string. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +func (c *Context) CompileString(src string, options ...BuildOption) Value { + cfg := c.parseOptions(options) + return c.compile(c.runtime().Compile(&cfg, src)) +} + +// CompileBytes parses and build a Value from the given source bytes. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +func (c *Context) CompileBytes(b []byte, options ...BuildOption) Value { + cfg := c.parseOptions(options) + return c.compile(c.runtime().Compile(&cfg, b)) +} + +// TODO: fs.FS or custom wrapper? +// // CompileFile parses and build a Value from the given source bytes. +// // +// // The returned Value will represent an error, accessible through Err, if any +// // error occurred. +// func (c *Context) CompileFile(f fs.File, options ...BuildOption) Value { +// b, err := io.ReadAll(f) +// if err != nil { +// return c.makeError(errors.Promote(err, "parsing file system file")) +// } +// return c.compile(c.runtime().Compile("", b)) +// } + +func (c *Context) make(v *adt.Vertex) Value { + return newValueRoot(c.runtime(), newContext(c.runtime()), v) +} + +// An EncodeOption defines options for the various encoding-related methods of +// Context. +type EncodeOption func(*encodeOptions) + +type encodeOptions struct { + nilIsTop bool +} + +func (o *encodeOptions) process(option []EncodeOption) { + for _, f := range option { + f(o) + } +} + +// NilIsAny indicates whether a nil value is interpreted as null or _. +// +// The default is to interpret nil as _. +func NilIsAny(isAny bool) EncodeOption { + return func(o *encodeOptions) { o.nilIsTop = isAny } +} + +// Encode converts a Go value to a CUE value. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +// +// Encode traverses the value v recursively. If an encountered value implements +// the json.Marshaler interface and is not a nil pointer, Encode calls its +// MarshalJSON method to produce JSON and convert that to CUE instead. If no +// MarshalJSON method is present but the value implements encoding.TextMarshaler +// instead, Encode calls its MarshalText method and encodes the result as a +// string. +// +// Otherwise, Encode uses the following type-dependent default encodings: +// +// Boolean values encode as CUE booleans. +// +// Floating point, integer, and *big.Int and *big.Float values encode as CUE +// numbers. +// +// String values encode as CUE strings coerced to valid UTF-8, replacing +// sequences of invalid bytes with the Unicode replacement rune as per Unicode's +// and W3C's recommendation. +// +// Array and slice values encode as CUE lists, except that []byte encodes as a +// bytes value, and a nil slice encodes as the null. +// +// Struct values encode as CUE structs. Each exported struct field becomes a +// member of the object, using the field name as the object key, unless the +// field is omitted for one of the reasons given below. +// +// The encoding of each struct field can be customized by the format string +// stored under the "json" key in the struct field's tag. The format string +// gives the name of the field, possibly followed by a comma-separated list of +// options. The name may be empty in order to specify options without overriding +// the default field name. +// +// The "omitempty" option specifies that the field should be omitted from the +// encoding if the field has an empty value, defined as false, 0, a nil pointer, +// a nil interface value, and any empty array, slice, map, or string. +// +// See the documentation for Go's json.Marshal for more details on the field +// tags and their meaning. +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the usual Go visibility +// rules amended as described in the next paragraph. An anonymous struct field +// with a name given in its JSON tag is treated as having that name, rather than +// being anonymous. An anonymous struct field of interface type is treated the +// same as having that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for when deciding which +// field to encode or decode. If there are multiple fields at the same level, +// and that level is the least nested (and would therefore be the nesting level +// selected by the usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are +// considered, even if there are multiple untagged fields that would otherwise +// conflict. +// +// 2) If there is exactly one field (tagged or not according to the first rule), +// that is selected. +// +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Map values encode as CUE structs. The map's key type must either be a string, +// an integer type, or implement encoding.TextMarshaler. The map keys are sorted +// and used as CUE struct field names by applying the following rules, subject +// to the UTF-8 coercion described for string values above: +// +// - keys of any string type are used directly +// - encoding.TextMarshalers are marshaled +// - integer keys are converted to strings +// +// Pointer values encode as the value pointed to. A nil pointer encodes as the +// null CUE value. +// +// Interface values encode as the value contained in the interface. A nil +// interface value encodes as the null CUE value. The NilIsAny EncodingOption +// can be used to interpret nil as any (_) instead. +// +// Channel, complex, and function values cannot be encoded in CUE. Attempting to +// encode such a value results in the returned value being an error, accessible +// through the Err method. +// +func (c *Context) Encode(x interface{}, option ...EncodeOption) Value { + switch v := x.(type) { + case adt.Value: + return newValueRoot(c.runtime(), c.ctx(), v) + } + var options encodeOptions + options.process(option) + + ctx := c.ctx() + // TODO: is true the right default? + expr := convert.GoValueToValue(ctx, x, options.nilIsTop) + n := &adt.Vertex{} + n.AddConjunct(adt.MakeRootConjunct(nil, expr)) + n.Finalize(ctx) + return c.make(n) +} + +// Encode converts a Go type to a CUE value. +// +// The returned Value will represent an error, accessible through Err, if any +// error occurred. +func (c *Context) EncodeType(x interface{}, option ...EncodeOption) Value { + switch v := x.(type) { + case *adt.Vertex: + return c.make(v) + } + + ctx := c.ctx() + expr, err := convert.GoTypeToExpr(ctx, x) + if err != nil { + return c.makeError(err) + } + n := &adt.Vertex{} + n.AddConjunct(adt.MakeRootConjunct(nil, expr)) + n.Finalize(ctx) + return c.make(n) +} + +// NewList creates a Value that is a list of the given values. +// +// All Values must be created by c. +func (c *Context) NewList(v ...Value) Value { + a := make([]adt.Value, len(v)) + for i, x := range v { + if x.idx != (*runtime.Runtime)(c) { + panic("values must be from same Context") + } + a[i] = x.v + } + return c.make(c.ctx().NewList(a...)) +} + +// TODO: + +// func (c *Context) NewExpr(op Op, v ...Value) Value { +// return Value{} +// } + +// func (c *Context) NewValue(v ...ValueElem) Value { +// return Value{} +// } + +// func NewAttr(key string, values ...string) *Attribute { +// return &Attribute{} +// } + +// // Clear unloads all previously-loaded imports. +// func (c *Context) Clear() { +// } + +// // Values created up to the point of the Fork will be valid in both runtimes. +// func (c *Context) Fork() *Context { +// return nil +// } + +// type ValueElem interface { +// } + +// func NewField(sel Selector, value Value, attrs ...Attribute) ValueElem { +// return nil +// } + +// func NewDocComment(text string) ValueElem { +// return nil +// } + +// newContext returns a new evaluation context. +func newContext(idx *runtime.Runtime) *adt.OpContext { + if idx == nil { + return nil + } + return eval.NewContext(idx, nil) +} + +func debugStr(ctx *adt.OpContext, v adt.Node) string { + return debug.NodeString(ctx, v, nil) +} + +func str(c *adt.OpContext, v adt.Node) string { + return debugStr(c, v) +} + +// eval returns the evaluated value. This may not be the vertex. +// +// Deprecated: use ctx.value +func (v Value) eval(ctx *adt.OpContext) adt.Value { + if v.v == nil { + panic("undefined value") + } + x := manifest(ctx, v.v) + return x.Value() +} + +// TODO: change from Vertex to Vertex. +func manifest(ctx *adt.OpContext, v *adt.Vertex) *adt.Vertex { + v.Finalize(ctx) + return v +} diff --git a/vendor/cuelang.org/go/cue/cue.go b/vendor/cuelang.org/go/cue/cue.go new file mode 100644 index 0000000000..6f9622f644 --- /dev/null +++ b/vendor/cuelang.org/go/cue/cue.go @@ -0,0 +1,43 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cue is the main API for CUE evaluation. +// +// Value is the main type that represents CUE evaluations. Values are created +// with a cue.Context. Only values created from the same Context can be +// involved in the same operation. +// +// A Context defines the set of active packages, the translations of field +// names to unique codes, as well as the set of builtins. Use +// +// import "cuelang.org/go/cue/cuecontext" +// +// ctx := cuecontext.New() +// +// to obtain a context. +// +// +// Note that the following types are DEPRECATED and their usage should be +// avoided if possible: +// +// FieldInfo +// Instance +// Runtime +// Struct +// +// Many types also have deprecated methods. Code that already uses deprecated +// methods can keep using them for at least some time. We aim to provide a +// go or cue fix solution to automatically rewrite code using the new API. +// +package cue diff --git a/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go new file mode 100644 index 0000000000..06080793e3 --- /dev/null +++ b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go @@ -0,0 +1,31 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cuecontext + +import ( + "cuelang.org/go/cue" + "cuelang.org/go/internal/core/runtime" + + _ "cuelang.org/go/pkg" +) + +// Option controls a build context. +type Option interface{ buildOption() } + +// New creates a new Context. +func New(options ...Option) *cue.Context { + r := runtime.New() + return (*cue.Context)(r) +} diff --git a/vendor/cuelang.org/go/cue/decode.go b/vendor/cuelang.org/go/cue/decode.go new file mode 100644 index 0000000000..f4adc5bd11 --- /dev/null +++ b/vendor/cuelang.org/go/cue/decode.go @@ -0,0 +1,943 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/internal/core/adt" +) + +// Decode initializes x with Value v. If x is a struct, it will validate the +// constraints specified in the field tags. +func (v Value) Decode(x interface{}) error { + var d decoder + w := reflect.ValueOf(x) + switch { + case !reflect.Indirect(w).CanSet(): + d.addErr(errors.Newf(v.Pos(), "cannot decode into unsettable value")) + + default: + if w.Kind() == reflect.Ptr { + w = w.Elem() + } + d.decode(w, v, false) + } + return d.errs +} + +type decoder struct { + errs errors.Error +} + +func (d *decoder) addErr(err error) { + if err != nil { + d.errs = errors.Append(d.errs, errors.Promote(err, "")) + } +} + +func incompleteError(v Value) errors.Error { + return &valueError{ + v: v, + err: &adt.Bottom{ + Code: adt.IncompleteError, + Err: errors.Newf(v.Pos(), + "cannot convert non-concrete value %v", v)}, + } +} + +func (d *decoder) clear(x reflect.Value) { + if x.CanSet() { + x.Set(reflect.Zero(x.Type())) + } +} + +func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { + if !x.IsValid() { + d.addErr(errors.Newf(v.Pos(), "cannot decode into invalid value")) + return + } + + v, _ = v.Default() + if v.v == nil { + d.clear(x) + return + } + + if err := v.Err(); err != nil { + d.addErr(err) + return + } + + switch x.Kind() { + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Interface: + // nullable types + if v.Null() == nil || !v.IsConcrete() { + d.clear(x) + return + } + + default: + // TODO: allow incomplete values. + if !v.IsConcrete() { + d.addErr(incompleteError(v)) + return + } + } + + ij, it, x := indirect(x, v.Null() == nil) + + if ij != nil { + b, err := v.marshalJSON() + d.addErr(err) + d.addErr(ij.UnmarshalJSON(b)) + return + } + + if it != nil { + b, err := v.Bytes() + if err != nil { + err = errors.Wrapf(err, v.Pos(), "Decode") + d.addErr(err) + return + } + d.addErr(it.UnmarshalText(b)) + return + } + + kind := x.Kind() + + if kind == reflect.Interface { + value := d.interfaceValue(v) + x.Set(reflect.ValueOf(value)) + return + } + + switch kind { + case reflect.Ptr: + d.decode(x.Elem(), v, true) + + case reflect.Bool: + b, err := v.Bool() + d.addErr(err) + x.SetBool(b) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := v.Int64() + d.addErr(err) + if x.OverflowInt(i) { + d.addErr(errors.Newf(v.Pos(), "integer %d overflows %s", i, kind)) + break + } + x.SetInt(i) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i, err := v.Uint64() + d.addErr(err) + if x.OverflowUint(i) { + d.addErr(errors.Newf(v.Pos(), "integer %d overflows %s", i, kind)) + break + } + x.SetUint(i) + + case reflect.Float32, reflect.Float64: + f, err := v.Float64() + d.addErr(err) + if x.OverflowFloat(f) { + d.addErr(errors.Newf(v.Pos(), "float %g overflows %s", f, kind)) + break + } + x.SetFloat(f) + + case reflect.String: + s, err := v.String() + d.addErr(err) + x.SetString(s) + + case reflect.Array: + d.clear(x) + + t := x.Type() + n := x.Len() + + if t.Elem().Kind() == reflect.Uint8 && v.Kind() == BytesKind { + b, err := v.Bytes() + d.addErr(err) + for i, c := range b { + if i >= n { + break + } + x.Index(i).SetUint(uint64(c)) + } + break + } + + var a []Value + list, err := v.List() + d.addErr(err) + for list.Next() { + a = append(a, list.Value()) + } + + for i, v := range a { + if i >= n { + break + } + d.decode(x.Index(i), v, false) + } + + case reflect.Slice: + t := x.Type() + if t.Elem().Kind() == reflect.Uint8 && v.Kind() == BytesKind { + b, err := v.Bytes() + d.addErr(err) + x.SetBytes(b) + break + } + + var a []Value + list, err := v.List() + d.addErr(err) + for list.Next() { + a = append(a, list.Value()) + } + + switch cap := x.Cap(); { + case cap == 0, // force a non-nil list + cap < len(a): + x.Set(reflect.MakeSlice(t, len(a), len(a))) + + default: + x.SetLen(len(a)) + } + + for i, v := range a { + d.decode(x.Index(i), v, false) + } + + case reflect.Struct: + d.convertStruct(x, v) + + case reflect.Map: + d.convertMap(x, v) + + default: + d.clear(x) + } +} + +func (d *decoder) interfaceValue(v Value) (x interface{}) { + var err error + v, _ = v.Default() + switch v.Kind() { + case NullKind: + return nil + + case BoolKind: + x, err = v.Bool() + + case IntKind: + if i, err := v.Int64(); err == nil { + return int(i) + } + x, err = v.Int(nil) + + case FloatKind: + x, err = v.Float64() // or big int or + + case StringKind: + x, err = v.String() + + case BytesKind: + x, err = v.Bytes() + + case ListKind: + var a []interface{} + list, err := v.List() + d.addErr(err) + for list.Next() { + a = append(a, d.interfaceValue(list.Value())) + } + x = a + + case StructKind: + m := map[string]interface{}{} + iter, err := v.Fields() + d.addErr(err) + for iter.Next() { + m[iter.Label()] = d.interfaceValue(iter.Value()) + } + x = m + + default: + err = incompleteError(v) + } + + d.addErr(err) + return x +} + +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// convertMap keeps an existing map and overwrites any entry found in v, +// keeping other preexisting entries. +func (d *decoder) convertMap(x reflect.Value, v Value) { + // Delete existing elements + t := x.Type() + + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.addErr(errors.Newf(v.Pos(), "unsupported key type %v", t.Key())) + return + } + } + + if x.IsNil() { + x.Set(reflect.MakeMap(t)) + } + + var mapElem reflect.Value + + iter, err := v.Fields() + d.addErr(err) + for iter.Next() { + key := iter.Label() + + var kv reflect.Value + kt := t.Key() + switch { + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + err := kv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(key)) + d.addErr(err) + kv = kv.Elem() + + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + d.addErr(err) + if reflect.Zero(kt).OverflowInt(n) { + d.addErr(errors.Newf(v.Pos(), "key integer %d overflows %s", n, kt)) + break + } + kv = reflect.ValueOf(n).Convert(kt) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + d.addErr(err) + if reflect.Zero(kt).OverflowUint(n) { + d.addErr(errors.Newf(v.Pos(), "key integer %d overflows %s", n, kt)) + break + } + kv = reflect.ValueOf(n).Convert(kt) + + default: + panic("json: Unexpected key type") // should never occur + } + } + + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + d.decode(mapElem, iter.Value(), false) + + if kv.IsValid() { + x.SetMapIndex(kv, mapElem) + } + } +} + +func (d *decoder) convertStruct(x reflect.Value, v Value) { + t := x.Type() + fields := cachedTypeFields(t) + + iter, err := v.Fields() + d.addErr(err) + for iter.Next() { + + var f *goField + key := iter.Label() + if i, ok := fields.nameIndex[key]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + key := []byte(key) + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + + if f == nil { + continue + } + + // Figure out field corresponding to key. + subv := x + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.addErr(errors.Newf(v.Pos(), + "cannot set embedded pointer to unexported struct: %v", + subv.Type().Elem())) + subv = reflect.Value{} + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + + // TODO: make this an option + // else if d.disallowUnknownFields { + // d.saveError(fmt.Errorf("json: unknown field %q", key)) + // } + + d.decode(subv, iter.Value(), false) + } +} + +type structFields struct { + list []goField + nameIndex map[string]int +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +// A field represents a single Go field found in a struct. +type goField struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + nameNonEsc string // `"` + name + `":` + nameEscHTML string // `"` + HTMLEscape(name) + `":` + + tag bool + index []int + typ reflect.Type + omitEmpty bool +} + +// byIndex sorts goField by index sequence. +type byIndex []goField + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) structFields { + // Anonymous fields to explore at the current level and the next. + current := []goField{} + next := []goField{{typ: t}} + + // Count of queued names for current level and the next. + var count, nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []goField + + // Buffer to run HTMLEscape on field names. + var nameEscBuf bytes.Buffer + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + isUnexported := sf.PkgPath != "" + if sf.Anonymous { + t := sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if isUnexported && t.Kind() != reflect.Struct { + // Ignore embedded fields of unexported non-struct types. + continue + } + // Do not ignore embedded fields of unexported struct types + // since they may have exported fields. + } else if isUnexported { + // Ignore unexported non-embedded fields. + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + field := goField{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + } + field.nameBytes = []byte(field.name) + field.equalFold = foldFunc(field.nameBytes) + + // Build nameEscHTML and nameNonEsc ahead of time. + nameEscBuf.Reset() + nameEscBuf.WriteString(`"`) + json.HTMLEscape(&nameEscBuf, field.nameBytes) + nameEscBuf.WriteString(`":`) + field.nameEscHTML = nameEscBuf.String() + field.nameNonEsc = `"` + field.name + `":` + + fields = append(fields, field) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, goField{name: ft.Name(), index: index, typ: ft}) + } + } + } + } + + sort.Slice(fields, func(i, j int) bool { + x := fields + // sort field by name, breaking ties with depth, then + // breaking ties with "name came from json tag", then + // breaking ties with index sequence. + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) + }) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + nameIndex := make(map[string]int, len(fields)) + for i, field := range fields { + nameIndex[field.name] = i + } + return structFields{fields, nameIndex} +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []goField) (goField, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level, either both tagged or neither tagged. + if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag { + return goField{}, false + } + return fields[0], true +} + +var fieldCache sync.Map // map[reflect.Type]structFields + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) structFields { + if f, ok := fieldCache.Load(t); ok { + return f.(structFields) + } + f, _ := fieldCache.LoadOrStore(t, typeFields(t)) + return f.(structFields) +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} diff --git a/vendor/cuelang.org/go/cue/errors.go b/vendor/cuelang.org/go/cue/errors.go new file mode 100644 index 0000000000..d079b970ac --- /dev/null +++ b/vendor/cuelang.org/go/cue/errors.go @@ -0,0 +1,134 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/runtime" +) + +func (v Value) toErr(b *adt.Bottom) (err errors.Error) { + errs := errors.Errors(b.Err) + if len(errs) > 1 { + for _, e := range errs { + bb := *b + bb.Err = e + err = errors.Append(err, &valueError{v: v, err: &bb}) + } + return err + } + return &valueError{v: v, err: b} +} + +var _ errors.Error = &valueError{} + +// A valueError is returned as a result of evaluating a value. +type valueError struct { + v Value + err *adt.Bottom +} + +func (e *valueError) Unwrap() error { + if e.err.Err == nil { + return nil + } + return errors.Unwrap(e.err.Err) +} + +func (e *valueError) Bottom() *adt.Bottom { return e.err } + +func (e *valueError) Error() string { + return errors.String(e) +} + +func (e *valueError) Position() token.Pos { + if e.err.Err != nil { + return e.err.Err.Position() + } + src := e.err.Source() + if src == nil { + return token.NoPos + } + return src.Pos() +} + +func (e *valueError) InputPositions() []token.Pos { + if e.err.Err == nil { + return nil + } + return e.err.Err.InputPositions() +} + +func (e *valueError) Msg() (string, []interface{}) { + if e.err.Err == nil { + return "", nil + } + return e.err.Err.Msg() +} + +func (e *valueError) Path() (a []string) { + if e.err.Err != nil { + a = e.err.Err.Path() + if a != nil { + return a + } + } + return pathToStrings(e.v.Path()) +} + +var errNotExists = &adt.Bottom{ + Code: adt.IncompleteError, + NotExists: true, + Err: errors.Newf(token.NoPos, "undefined value"), +} + +func mkErr(idx *runtime.Runtime, src adt.Node, args ...interface{}) *adt.Bottom { + var e *adt.Bottom + var code adt.ErrorCode = -1 +outer: + for i, a := range args { + switch x := a.(type) { + case adt.ErrorCode: + code = x + case *adt.Bottom: + e = adt.CombineErrors(nil, e, x) + case []*adt.Bottom: + for _, b := range x { + e = adt.CombineErrors(nil, e, b) + } + case errors.Error: + e = adt.CombineErrors(nil, e, &adt.Bottom{Err: x}) + case adt.Expr: + case string: + args := args[i+1:] + // Do not expand message so that errors can be localized. + pos := pos(src) + if code < 0 { + code = 0 + } + e = adt.CombineErrors(nil, e, &adt.Bottom{ + Code: code, + Err: errors.Newf(pos, x, args...), + }) + break outer + } + } + if code >= 0 { + e.Code = code + } + return e +} diff --git a/vendor/cuelang.org/go/cue/errors/errors.go b/vendor/cuelang.org/go/cue/errors/errors.go new file mode 100644 index 0000000000..af5038c22b --- /dev/null +++ b/vendor/cuelang.org/go/cue/errors/errors.go @@ -0,0 +1,651 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package errors defines shared types for handling CUE errors. +// +// The pivotal error type in CUE packages is the interface type Error. +// The information available in such errors can be most easily retrieved using +// the Path, Positions, and Print functions. +package errors // import "cuelang.org/go/cue/errors" + +import ( + "bytes" + "errors" + "fmt" + "io" + "path/filepath" + "sort" + "strings" + + "github.com/mpvl/unique" + + "cuelang.org/go/cue/token" +) + +// New is a convenience wrapper for errors.New in the core library. +// It does not return a CUE error. +func New(msg string) error { + return errors.New(msg) +} + +// Unwrap returns the result of calling the Unwrap method on err, if err +// implements Unwrap. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} + +// Is reports whether any error in err's chain matches target. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in err's chain that matches the type to which target +// points, and if so, sets the target to its value and returns true. An error +// matches a type if it is assignable to the target type, or if it has a method +// As(interface{}) bool such that As(target) returns true. As will panic if +// target is not a non-nil pointer to a type which implements error or is of +// interface type. +// +// The As method should set the target to its value and return true if err +// matches the type to which target points. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} + +// A Message implements the error interface as well as Message to allow +// internationalized messages. A Message is typically used as an embedding +// in a CUE message. +type Message struct { + format string + args []interface{} +} + +// NewMessage creates an error message for human consumption. The arguments +// are for later consumption, allowing the message to be localized at a later +// time. The passed argument list should not be modified. +func NewMessage(format string, args []interface{}) Message { + return Message{format: format, args: args} +} + +// Msg returns a printf-style format string and its arguments for human +// consumption. +func (m *Message) Msg() (format string, args []interface{}) { + return m.format, m.args +} + +func (m *Message) Error() string { + return fmt.Sprintf(m.format, m.args...) +} + +// Error is the common error message. +type Error interface { + // Position returns the primary position of an error. If multiple positions + // contribute equally, this reflects one of them. + Position() token.Pos + + // InputPositions reports positions that contributed to an error, including + // the expressions resulting in the conflict, as well as values that were + // the input to this expression. + InputPositions() []token.Pos + + // Error reports the error message without position information. + Error() string + + // Path returns the path into the data tree where the error occurred. + // This path may be nil if the error is not associated with such a location. + Path() []string + + // Msg returns the unformatted error message and its arguments for human + // consumption. + Msg() (format string, args []interface{}) +} + +// Positions returns all positions returned by an error, sorted +// by relevance when possible and with duplicates removed. +func Positions(err error) []token.Pos { + e := Error(nil) + if !errors.As(err, &e) { + return nil + } + + a := make([]token.Pos, 0, 3) + + sortOffset := 0 + pos := e.Position() + if pos.IsValid() { + a = append(a, pos) + sortOffset = 1 + } + + for _, p := range e.InputPositions() { + if p.IsValid() && p != pos { + a = append(a, p) + } + } + + byPos := byPos(a[sortOffset:]) + sort.Sort(byPos) + k := unique.ToFront(byPos) + return a[:k+sortOffset] +} + +type byPos []token.Pos + +func (s *byPos) Truncate(n int) { (*s) = (*s)[:n] } +func (s byPos) Len() int { return len(s) } +func (s byPos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPos) Less(i, j int) bool { return comparePos(s[i], s[j]) == -1 } + +// Path returns the path of an Error if err is of that type. +func Path(err error) []string { + if e := Error(nil); errors.As(err, &e) { + return e.Path() + } + return nil +} + +// Newf creates an Error with the associated position and message. +func Newf(p token.Pos, format string, args ...interface{}) Error { + return &posError{ + pos: p, + Message: NewMessage(format, args), + } +} + +// Wrapf creates an Error with the associated position and message. The provided +// error is added for inspection context. +func Wrapf(err error, p token.Pos, format string, args ...interface{}) Error { + pErr := &posError{ + pos: p, + Message: NewMessage(format, args), + } + return Wrap(pErr, err) +} + +// Wrap creates a new error where child is a subordinate error of parent. +// If child is list of Errors, the result will itself be a list of errors +// where child is a subordinate error of each parent. +func Wrap(parent Error, child error) Error { + if child == nil { + return parent + } + a, ok := child.(list) + if !ok { + return &wrapped{parent, child} + } + b := make(list, len(a)) + for i, err := range a { + b[i] = &wrapped{parent, err} + } + return b +} + +type wrapped struct { + main Error + wrap error +} + +// Error implements the error interface. +func (e *wrapped) Error() string { + switch msg := e.main.Error(); { + case e.wrap == nil: + return msg + case msg == "": + return e.wrap.Error() + default: + return fmt.Sprintf("%s: %s", msg, e.wrap) + } +} + +func (e *wrapped) Is(target error) bool { + return Is(e.main, target) +} + +func (e *wrapped) As(target interface{}) bool { + return As(e.main, target) +} + +func (e *wrapped) Msg() (format string, args []interface{}) { + return e.main.Msg() +} + +func (e *wrapped) Path() []string { + if p := Path(e.main); p != nil { + return p + } + return Path(e.wrap) +} + +func (e *wrapped) InputPositions() []token.Pos { + return append(e.main.InputPositions(), Positions(e.wrap)...) +} + +func (e *wrapped) Position() token.Pos { + if p := e.main.Position(); p != token.NoPos { + return p + } + if wrap, ok := e.wrap.(Error); ok { + return wrap.Position() + } + return token.NoPos +} + +func (e *wrapped) Unwrap() error { return e.wrap } + +func (e *wrapped) Cause() error { return e.wrap } + +// Promote converts a regular Go error to an Error if it isn't already one. +func Promote(err error, msg string) Error { + switch x := err.(type) { + case Error: + return x + default: + return Wrapf(err, token.NoPos, msg) + } +} + +var _ Error = &posError{} + +// In an List, an error is represented by an *posError. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +type posError struct { + pos token.Pos + inputs []token.Pos + Message +} + +func (e *posError) Path() []string { return nil } +func (e *posError) InputPositions() []token.Pos { return e.inputs } +func (e *posError) Position() token.Pos { return e.pos } + +// Append combines two errors, flattening Lists as necessary. +func Append(a, b Error) Error { + switch x := a.(type) { + case nil: + return b + case list: + return appendToList(x, b) + } + // Preserve order of errors. + list := appendToList(nil, a) + list = appendToList(list, b) + return list +} + +// Errors reports the individual errors associated with an error, which is +// the error itself if there is only one or, if the underlying type is List, +// its individual elements. If the given error is not an Error, it will be +// promoted to one. +func Errors(err error) []Error { + switch x := err.(type) { + case nil: + return nil + case list: + return []Error(x) + case Error: + return []Error{x} + default: + return []Error{Promote(err, "")} + } +} + +func appendToList(a list, err Error) list { + switch x := err.(type) { + case nil: + return a + case list: + if a == nil { + return x + } + return append(a, x...) + default: + return append(a, err) + } +} + +// list is a list of Errors. +// The zero value for an list is an empty list ready to use. +type list []Error + +func (p list) Is(err, target error) bool { + for _, e := range p { + if errors.Is(e, target) { + return true + } + } + return false +} + +func (p list) As(err error, target interface{}) bool { + for _, e := range p { + if errors.As(e, target) { + return true + } + } + return false +} + +// AddNewf adds an Error with given position and error message to an List. +func (p *list) AddNewf(pos token.Pos, msg string, args ...interface{}) { + err := &posError{pos: pos, Message: Message{format: msg, args: args}} + *p = append(*p, err) +} + +// Add adds an Error with given position and error message to an List. +func (p *list) Add(err Error) { + *p = appendToList(*p, err) +} + +// Reset resets an List to no errors. +func (p *list) Reset() { *p = (*p)[:0] } + +// List implements the sort Interface. +func (p list) Len() int { return len(p) } +func (p list) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p list) Less(i, j int) bool { + if c := comparePos(p[i].Position(), p[j].Position()); c != 0 { + return c == -1 + } + // Note that it is not sufficient to simply compare file offsets because + // the offsets do not reflect modified line information (through //line + // comments). + + if !equalPath(p[i].Path(), p[j].Path()) { + return lessPath(p[i].Path(), p[j].Path()) + } + return p[i].Error() < p[j].Error() +} + +func lessOrMore(isLess bool) int { + if isLess { + return -1 + } + return 1 +} + +func comparePos(a, b token.Pos) int { + if a.Filename() != b.Filename() { + return lessOrMore(a.Filename() < b.Filename()) + } + if a.Line() != b.Line() { + return lessOrMore(a.Line() < b.Line()) + } + if a.Column() != b.Column() { + return lessOrMore(a.Column() < b.Column()) + } + return 0 +} + +func lessPath(a, b []string) bool { + for i, x := range a { + if i >= len(b) { + return false + } + if x != b[i] { + return x < b[i] + } + } + return len(a) < len(b) +} + +func equalPath(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, x := range a { + if x != b[i] { + return false + } + } + return true +} + +// Sanitize sorts multiple errors and removes duplicates on a best effort basis. +// If err represents a single or no error, it returns the error as is. +func Sanitize(err Error) Error { + if l, ok := err.(list); ok && err != nil { + a := make(list, len(l)) + copy(a, l) + a.Sort() + a.RemoveMultiples() + return a + } + return err +} + +// Sort sorts an List. *posError entries are sorted by position, +// other errors are sorted by error message, and before any *posError +// entry. +// +func (p list) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an List and removes all but the first error per line. +func (p *list) RemoveMultiples() { + p.Sort() + var last Error + i := 0 + for _, e := range *p { + if last == nil || !approximateEqual(last, e) { + last = e + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +func approximateEqual(a, b Error) bool { + aPos := a.Position() + bPos := b.Position() + if aPos == token.NoPos || bPos == token.NoPos { + return a.Error() == b.Error() + } + return aPos.Filename() == bPos.Filename() && + aPos.Line() == bPos.Line() && + equalPath(a.Path(), b.Path()) +} + +// An List implements the error interface. +func (p list) Error() string { + format, args := p.Msg() + return fmt.Sprintf(format, args...) +} + +// Msg reports the unformatted error message for the first error, if any. +func (p list) Msg() (format string, args []interface{}) { + switch len(p) { + case 0: + return "no errors", nil + case 1: + return p[0].Msg() + } + return "%s (and %d more errors)", []interface{}{p[0], len(p) - 1} +} + +// Position reports the primary position for the first error, if any. +func (p list) Position() token.Pos { + if len(p) == 0 { + return token.NoPos + } + return p[0].Position() +} + +// InputPositions reports the input positions for the first error, if any. +func (p list) InputPositions() []token.Pos { + if len(p) == 0 { + return nil + } + return p[0].InputPositions() +} + +// Path reports the path location of the first error, if any. +func (p list) Path() []string { + if len(p) == 0 { + return nil + } + return p[0].Path() +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p list) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// A Config defines parameters for printing. +type Config struct { + // Format formats the given string and arguments and writes it to w. + // It is used for all printing. + Format func(w io.Writer, format string, args ...interface{}) + + // Cwd is the current working directory. Filename positions are taken + // relative to this path. + Cwd string + + // ToSlash sets whether to use Unix paths. Mostly used for testing. + ToSlash bool +} + +// Print is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an List. Otherwise +// it prints the err string. +// +func Print(w io.Writer, err error, cfg *Config) { + if cfg == nil { + cfg = &Config{} + } + if e, ok := err.(Error); ok { + err = Sanitize(e) + } + for _, e := range Errors(err) { + printError(w, e, cfg) + } +} + +// Details is a convenience wrapper for Print to return the error text as a +// string. +func Details(err error, cfg *Config) string { + w := &bytes.Buffer{} + Print(w, err, cfg) + return w.String() +} + +// String generates a short message from a given Error. +func String(err Error) string { + w := &strings.Builder{} + writeErr(w, err) + return w.String() +} + +func writeErr(w io.Writer, err Error) { + if path := strings.Join(err.Path(), "."); path != "" { + _, _ = io.WriteString(w, path) + _, _ = io.WriteString(w, ": ") + } + + for { + u := errors.Unwrap(err) + + printed := false + msg, args := err.Msg() + if msg != "" || u == nil { // print at least something + fmt.Fprintf(w, msg, args...) + printed = true + } + + if u == nil { + break + } + + if printed { + _, _ = io.WriteString(w, ": ") + } + err, _ = u.(Error) + if err == nil { + fmt.Fprint(w, u) + break + } + } +} + +func defaultFprintf(w io.Writer, format string, args ...interface{}) { + fmt.Fprintf(w, format, args...) +} + +func printError(w io.Writer, err error, cfg *Config) { + if err == nil { + return + } + fprintf := cfg.Format + if fprintf == nil { + fprintf = defaultFprintf + } + + positions := []string{} + for _, p := range Positions(err) { + pos := p.Position() + s := pos.Filename + if cfg.Cwd != "" { + if p, err := filepath.Rel(cfg.Cwd, s); err == nil { + s = p + // Some IDEs (e.g. VSCode) only recognize a path if it start + // with a dot. This also helps to distinguish between local + // files and builtin packages. + if !strings.HasPrefix(s, ".") { + s = fmt.Sprintf(".%s%s", string(filepath.Separator), s) + } + } + } + if cfg.ToSlash { + s = filepath.ToSlash(s) + } + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + positions = append(positions, s) + } + + if e, ok := err.(Error); ok { + writeErr(w, e) + } else { + fprintf(w, "%v", err) + } + + if len(positions) == 0 { + fprintf(w, "\n") + return + } + + fprintf(w, ":\n") + for _, pos := range positions { + fprintf(w, " %s\n", pos) + } +} diff --git a/vendor/cuelang.org/go/cue/format.go b/vendor/cuelang.org/go/cue/format.go new file mode 100644 index 0000000000..707a7990ef --- /dev/null +++ b/vendor/cuelang.org/go/cue/format.go @@ -0,0 +1,201 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "fmt" + "math/big" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/format" + "cuelang.org/go/internal/core/export" +) + +// TODO: +// * allow '-' to strip outer curly braces? +// - simplify output; can be used in combination with other flags +// * advertise: +// c like v, but print comments +// a like c, but print attributes and package-local hidden fields as well + +// Format prints a CUE value. +// +// WARNING: +// although we are narrowing down the semantics, the verbs and options +// are still subject to change. this API is experimental although it is +// likely getting close to the final design. +// +// It recognizes the following verbs: +// +// v print CUE value +// +// The verbs support the following flags: +// # print as schema and include definitions. +// The result is printed as a self-contained file, instead of an the +// expression format. +// + evaluate: resolve defaults and error on incomplete errors +// +// Indentation can be controlled as follows: +// width indent the cue block by tab stops (e.g. %2v) +// precision convert tabs to spaces (e.g. %.2v), where +// a value of 0 means no indentation or newlines (TODO). +// +// If the value kind corresponds to one of the following Go types, the +// usual Go formatting verbs for that type can be used: +// +// Int: b,d,o,O,q,x,X +// Float: f,e,E,g,G +// String/Bytes: s,q,x,X +// +// The %v directive will be used if the type is not supported for that verb. +// +func (v Value) Format(state fmt.State, verb rune) { + if v.v == nil { + fmt.Fprint(state, "") + return + } + + switch verb { + case 'a': + formatCUE(state, v, true, true) + case 'c': + formatCUE(state, v, true, false) + case 'v': + formatCUE(state, v, false, false) + + case 'd', 'o', 'O', 'U': + var i big.Int + if _, err := v.Int(&i); err != nil { + formatCUE(state, v, false, false) + return + } + i.Format(state, verb) + + case 'f', 'e', 'E', 'g', 'G': + d, err := v.Decimal() + if err != nil { + formatCUE(state, v, false, false) + return + } + d.Format(state, verb) + + case 's', 'q': + // TODO: this drops other formatting directives + msg := "%s" + if verb == 'q' { + msg = "%q" + } + + if b, err := v.Bytes(); err == nil { + fmt.Fprintf(state, msg, b) + } else { + s := fmt.Sprintf("%+v", v) + fmt.Fprintf(state, msg, s) + } + + case 'x', 'X': + switch v.Kind() { + case StringKind, BytesKind: + b, _ := v.Bytes() + // TODO: this drops other formatting directives + msg := "%x" + if verb == 'X' { + msg = "%X" + } + fmt.Fprintf(state, msg, b) + + case IntKind, NumberKind: + var i big.Int + _, _ = v.Int(&i) + i.Format(state, verb) + + case FloatKind: + dec, _ := v.Decimal() + dec.Format(state, verb) + + default: + formatCUE(state, v, false, false) + } + + default: + formatCUE(state, v, false, false) + } +} + +func formatCUE(state fmt.State, v Value, showDocs, showAll bool) { + + pkgPath := v.instance().ID() + + p := *export.Simplified + + isDef := false + switch { + case state.Flag('#'): + isDef = true + p = export.Profile{ + ShowOptional: true, + ShowDefinitions: true, + ShowHidden: true, + } + + case state.Flag('+'): + p = *export.Final + fallthrough + + default: + p.ShowHidden = showAll + } + + p.ShowDocs = showDocs + p.ShowAttributes = showAll + + var n ast.Node + if isDef { + n, _ = p.Def(v.idx, pkgPath, v.v) + } else { + n, _ = p.Value(v.idx, pkgPath, v.v) + } + + formatExpr(state, n) +} + +func formatExpr(state fmt.State, n ast.Node) { + opts := make([]format.Option, 0, 3) + if state.Flag('-') { + opts = append(opts, format.Simplify()) + } + // TODO: handle verbs to allow formatting based on type: + if width, ok := state.Width(); ok { + opts = append(opts, format.IndentPrefix(width)) + } + // TODO: consider this: should tabs or spaces be the default? + if tabwidth, ok := state.Precision(); ok { + // TODO: 0 means no newlines. + opts = append(opts, + format.UseSpaces(tabwidth), + format.TabIndent(false)) + } + // TODO: consider this. + // else if state.Flag(' ') { + // opts = append(opts, + // format.UseSpaces(4), + // format.TabIndent(false)) + // } + + b, _ := format.Node(n, opts...) + b = bytes.Trim(b, "\n\r") + _, _ = state.Write(b) +} diff --git a/vendor/cuelang.org/go/cue/format/format.go b/vendor/cuelang.org/go/cue/format/format.go new file mode 100644 index 0000000000..5e81eb3591 --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/format.go @@ -0,0 +1,350 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package format implements standard formatting of CUE configurations. +package format // import "cuelang.org/go/cue/format" + +// TODO: this package is in need of a rewrite. When doing so, the API should +// allow for reformatting an AST, without actually writing bytes. +// +// In essence, formatting determines the relative spacing to tokens. It should +// be possible to have an abstract implementation providing such information +// that can be used to either format or update an AST in a single walk. + +import ( + "bytes" + "fmt" + "strings" + "text/tabwriter" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" +) + +// An Option sets behavior of the formatter. +type Option func(c *config) + +// Simplify allows the formatter to simplify output, such as removing +// unnecessary quotes. +func Simplify() Option { + return func(c *config) { c.simplify = true } +} + +// UseSpaces specifies that tabs should be converted to spaces and sets the +// default tab width. +func UseSpaces(tabwidth int) Option { + return func(c *config) { + c.UseSpaces = true + c.Tabwidth = tabwidth + } +} + +// TabIndent specifies whether to use tabs for indentation independent of +// UseSpaces. +func TabIndent(indent bool) Option { + return func(c *config) { c.TabIndent = indent } +} + +// IndentPrefix specifies the number of tabstops to use as a prefix for every +// line. +func IndentPrefix(n int) Option { + return func(c *config) { c.Indent = n } +} + +// TODO: make public +// sortImportsOption causes import declarations to be sorted. +func sortImportsOption() Option { + return func(c *config) { c.sortImports = true } +} + +// TODO: other options: +// +// const ( +// RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored +// TabIndent // use tabs for indentation independent of UseSpaces +// UseSpaces // use spaces instead of tabs for alignment +// SourcePos // emit //line comments to preserve original source positions +// ) + +// Node formats node in canonical cue fmt style and writes the result to dst. +// +// The node type must be *ast.File, []syntax.Decl, syntax.Expr, syntax.Decl, or +// syntax.Spec. Node does not modify node. Imports are not sorted for nodes +// representing partial source files (for instance, if the node is not an +// *ast.File). +// +// The function may return early (before the entire result is written) and +// return a formatting error, for instance due to an incorrect AST. +// +func Node(node ast.Node, opt ...Option) ([]byte, error) { + cfg := newConfig(opt) + return cfg.fprint(node) +} + +// Source formats src in canonical cue fmt style and returns the result or an +// (I/O or syntax) error. src is expected to be a syntactically correct CUE +// source file, or a list of CUE declarations or statements. +// +// If src is a partial source file, the leading and trailing space of src is +// applied to the result (such that it has the same leading and trailing space +// as src), and the result is indented by the same amount as the first line of +// src containing code. Imports are not sorted for partial source files. +// +// Caution: Tools relying on consistent formatting based on the installed +// version of cue (for instance, such as for presubmit checks) should execute +// that cue binary instead of calling Source. +// +func Source(b []byte, opt ...Option) ([]byte, error) { + cfg := newConfig(opt) + + f, err := parser.ParseFile("", b, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("parse: %s", err) + } + + // print AST + return cfg.fprint(f) +} + +type config struct { + UseSpaces bool + TabIndent bool + Tabwidth int // default: 4 + Indent int // default: 0 (all code is indented at least by this much) + + simplify bool + sortImports bool +} + +func newConfig(opt []Option) *config { + cfg := &config{ + Tabwidth: 8, + TabIndent: true, + UseSpaces: true, + } + for _, o := range opt { + o(cfg) + } + return cfg +} + +// Config defines the output of Fprint. +func (cfg *config) fprint(node interface{}) (out []byte, err error) { + var p printer + p.init(cfg) + if err = printNode(node, &p); err != nil { + return p.output, err + } + + padchar := byte('\t') + if cfg.UseSpaces { + padchar = byte(' ') + } + + twmode := tabwriter.StripEscape | tabwriter.TabIndent | tabwriter.DiscardEmptyColumns + if cfg.TabIndent { + twmode |= tabwriter.TabIndent + } + + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, cfg.Tabwidth, 1, padchar, twmode) + + // write printer result via tabwriter/trimmer to output + if _, err = tw.Write(p.output); err != nil { + return + } + + err = tw.Flush() + if err != nil { + return buf.Bytes(), err + } + + b := buf.Bytes() + if !cfg.TabIndent { + b = bytes.ReplaceAll(b, []byte{'\t'}, bytes.Repeat([]byte{' '}, cfg.Tabwidth)) + } + return b, nil +} + +// A formatter walks a syntax.Node, interspersed with comments and spacing +// directives, in the order that they would occur in printed form. +type formatter struct { + *printer + + stack []frame + current frame + nestExpr int +} + +func newFormatter(p *printer) *formatter { + f := &formatter{ + printer: p, + current: frame{ + settings: settings{ + nodeSep: newline, + parentSep: newline, + }, + }, + } + return f +} + +type whiteSpace int + +const ( + ignore whiteSpace = 0 + + // write a space, or disallow it + blank whiteSpace = 1 << iota + vtab // column marker + noblank + + nooverride + + comma // print a comma, unless trailcomma overrides it + trailcomma // print a trailing comma unless closed on same line + declcomma // write a comma when not at the end of line + + newline // write a line in a table + formfeed // next line is not part of the table + newsection // add two newlines + + indent // request indent an extra level after the next newline + unindent // unindent a level after the next newline + indented // element was indented. +) + +type frame struct { + cg []*ast.CommentGroup + pos int8 + + settings +} + +type settings struct { + // separator is blank if the current node spans a single line and newline + // otherwise. + nodeSep whiteSpace + parentSep whiteSpace + override whiteSpace +} + +// suppress spurious linter warning: field is actually used. +func init() { + s := settings{} + _ = s.override +} + +func (f *formatter) print(a ...interface{}) { + for _, x := range a { + f.Print(x) + switch x.(type) { + case string, token.Token: // , *syntax.BasicLit, *syntax.Ident: + f.current.pos++ + } + } + f.visitComments(f.current.pos) +} + +func (f *formatter) formfeed() whiteSpace { + if f.current.nodeSep == blank { + return blank + } + return formfeed +} + +func (f *formatter) wsOverride(def whiteSpace) whiteSpace { + if f.current.override == ignore { + return def + } + return f.current.override +} + +func (f *formatter) onOneLine(node ast.Node) bool { + a := node.Pos() + b := node.End() + if a.IsValid() && b.IsValid() { + return f.lineFor(a) == f.lineFor(b) + } + // TODO: walk and look at relative positions to determine the same? + return false +} + +func (f *formatter) before(node ast.Node) bool { + f.stack = append(f.stack, f.current) + f.current = frame{settings: f.current.settings} + f.current.parentSep = f.current.nodeSep + + if node != nil { + s, ok := node.(*ast.StructLit) + if ok && len(s.Elts) <= 1 && f.current.nodeSep != blank && f.onOneLine(node) { + f.current.nodeSep = blank + } + f.current.cg = node.Comments() + f.visitComments(f.current.pos) + return true + } + return false +} + +func (f *formatter) after(node ast.Node) { + f.visitComments(127) + p := len(f.stack) - 1 + f.current = f.stack[p] + f.stack = f.stack[:p] + f.current.pos++ + f.visitComments(f.current.pos) +} + +func (f *formatter) visitComments(until int8) { + c := &f.current + + printed := false + for ; len(c.cg) > 0 && c.cg[0].Position <= until; c.cg = c.cg[1:] { + if printed { + f.Print(newsection) + } + printed = true + f.printComment(c.cg[0]) + } +} + +func (f *formatter) printComment(cg *ast.CommentGroup) { + f.Print(cg) + + printBlank := false + if cg.Doc && len(f.output) > 0 { + f.Print(newline) + printBlank = true + } + for _, c := range cg.List { + isEnd := strings.HasPrefix(c.Text, "//") + if !printBlank { + if isEnd { + f.Print(vtab) + } else { + f.Print(blank) + } + } + f.Print(c.Slash) + f.Print(c) + if isEnd { + f.Print(newline) + if cg.Doc { + f.Print(nooverride) + } + } + } +} diff --git a/vendor/cuelang.org/go/cue/format/import.go b/vendor/cuelang.org/go/cue/format/import.go new file mode 100644 index 0000000000..873de2c7f6 --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/import.go @@ -0,0 +1,167 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "sort" + "strconv" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data +// loss. +func sortImports(d *ast.ImportDecl) { + if !d.Lparen.IsValid() || len(d.Specs) == 0 { + // Not a block: sorted by default. + return + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && (s.Pos().RelPos() >= token.NewSection || hasDoc(s)) { + setRelativePos(s, token.Newline) + // j begins a new run. End this one. + block := sortSpecs(d.Specs[i:j]) + specs = append(specs, block...) + i = j + } + } + specs = append(specs, sortSpecs(d.Specs[i:])...) + setRelativePos(specs[0], token.Newline) + d.Specs = specs +} + +func setRelativePos(s *ast.ImportSpec, r token.RelPos) { + if hasDoc(s) { + return + } + pos := s.Pos().WithRel(r) + if s.Name != nil { + s.Name.NamePos = pos + } else { + s.Path.ValuePos = pos + } +} + +func hasDoc(s *ast.ImportSpec) bool { + for _, doc := range s.Comments() { + if doc.Doc { + return true + } + } + return false +} + +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s *ast.ImportSpec) string { + n := s.Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s *ast.ImportSpec) string { + for _, c := range s.Comments() { + if c.Line { + return c.Text() + } + } + return "" +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next *ast.ImportSpec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + for _, c := range prev.Comments() { + if !c.Doc { + return false + } + } + return true +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +func sortSpecs(specs []*ast.ImportSpec) []*ast.ImportSpec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + setRelativePos(specs[0], token.NewSection) + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec(specs)) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } + } + specs = deduped + + setRelativePos(specs[0], token.NewSection) + return specs +} + +type byImportSpec []*ast.ImportSpec + +func (x byImportSpec) Len() int { return len(x) } +func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x[i]) + jpath := importPath(x[j]) + if ipath != jpath { + return ipath < jpath + } + iname := importName(x[i]) + jname := importName(x[j]) + if iname != jname { + return iname < jname + } + return importComment(x[i]) < importComment(x[j]) +} diff --git a/vendor/cuelang.org/go/cue/format/node.go b/vendor/cuelang.org/go/cue/format/node.go new file mode 100644 index 0000000000..6d06ae4123 --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/node.go @@ -0,0 +1,916 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "fmt" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/scanner" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +func printNode(node interface{}, f *printer) error { + s := newFormatter(f) + + ls := labelSimplifier{scope: map[string]bool{}} + + // format node + f.allowed = nooverride // gobble initial whitespace. + switch x := node.(type) { + case *ast.File: + if f.cfg.simplify { + ls.markReferences(x) + } + s.file(x) + case ast.Expr: + if f.cfg.simplify { + ls.markReferences(x) + } + s.expr(x) + case ast.Decl: + if f.cfg.simplify { + ls.markReferences(x) + } + s.decl(x) + // case ast.Node: // TODO: do we need this? + // s.walk(x) + case []ast.Decl: + if f.cfg.simplify { + ls.processDecls(x) + } + s.walkDeclList(x) + default: + goto unsupported + } + + return s.errs + +unsupported: + return fmt.Errorf("cue/format: unsupported node type %T", node) +} + +func isRegularField(tok token.Token) bool { + return tok == token.ILLEGAL || tok == token.COLON +} + +// Helper functions for common node lists. They may be empty. + +func nestDepth(f *ast.Field) int { + d := 1 + if s, ok := f.Value.(*ast.StructLit); ok { + switch { + case len(s.Elts) != 1: + d = 0 + default: + if f, ok := s.Elts[0].(*ast.Field); ok { + d += nestDepth(f) + } + } + } + return d +} + +// TODO: be more accurate and move to astutil +func hasDocComments(d ast.Decl) bool { + if len(d.Comments()) > 0 { + return true + } + switch x := d.(type) { + case *ast.Field: + return len(x.Label.Comments()) > 0 + case *ast.Alias: + return len(x.Ident.Comments()) > 0 + case *ast.LetClause: + return len(x.Ident.Comments()) > 0 + } + return false +} + +func (f *formatter) walkDeclList(list []ast.Decl) { + f.before(nil) + d := 0 + hasEllipsis := false + for i, x := range list { + if i > 0 { + f.print(declcomma) + nd := 0 + if f, ok := x.(*ast.Field); ok { + nd = nestDepth(f) + } + if f.current.parentSep == newline && (d == 0 || nd != d) { + f.print(f.formfeed()) + } + if hasDocComments(x) { + switch x := list[i-1].(type) { + case *ast.Field: + if x.Token == token.ISA || internal.IsDefinition(x.Label) { + f.print(newsection) + } + + default: + f.print(newsection) + } + } + } + if f.printer.cfg.simplify && internal.IsEllipsis(x) { + hasEllipsis = true + continue + } + f.decl(x) + d = 0 + if f, ok := x.(*ast.Field); ok { + d = nestDepth(f) + } + if j := i + 1; j < len(list) { + switch x := list[j].(type) { + case *ast.Field: + switch x := x.Value.(type) { + case *ast.StructLit: + // TODO: not entirely correct: could have multiple elements, + // not have a valid Lbrace, and be marked multiline. This + // cannot occur for ASTs resulting from a parse, though. + if x.Lbrace.IsValid() || len(x.Elts) != 1 { + f.print(f.formfeed()) + continue + } + case *ast.ListLit: + f.print(f.formfeed()) + continue + } + } + } + f.print(f.current.parentSep) + } + if hasEllipsis { + f.decl(&ast.Ellipsis{}) + f.print(f.current.parentSep) + } + f.after(nil) +} + +func (f *formatter) walkSpecList(list []*ast.ImportSpec) { + f.before(nil) + for _, x := range list { + f.before(x) + f.importSpec(x) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) walkClauseList(list []ast.Clause, ws whiteSpace) { + f.before(nil) + for _, x := range list { + f.before(x) + f.print(ws) + f.clause(x) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) walkListElems(list []ast.Expr) { + f.before(nil) + for _, x := range list { + f.before(x) + switch n := x.(type) { + case *ast.Comprehension: + f.walkClauseList(n.Clauses, blank) + f.print(blank, nooverride) + f.expr(n.Value) + + case *ast.Ellipsis: + f.ellipsis(n) + + case *ast.Alias: + f.expr(n.Ident) + f.print(n.Equal, token.BIND) + f.expr(n.Expr) + + // TODO: ast.CommentGroup: allows comment groups in ListLits. + + case ast.Expr: + f.exprRaw(n, token.LowestPrec, 1) + } + f.print(comma, blank) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) walkArgsList(list []ast.Expr, depth int) { + f.before(nil) + for _, x := range list { + f.before(x) + f.exprRaw(x, token.LowestPrec, depth) + f.print(comma, blank) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) file(file *ast.File) { + f.before(file) + f.walkDeclList(file.Decls) + f.after(file) + f.print(token.EOF) +} + +func (f *formatter) inlineField(n *ast.Field) *ast.Field { + regular := internal.IsRegularField(n) + // shortcut single-element structs. + // If the label has a valid position, we assume that an unspecified + // Lbrace signals the intend to collapse fields. + if !n.Label.Pos().IsValid() && !(f.printer.cfg.simplify && regular) { + return nil + } + + obj, ok := n.Value.(*ast.StructLit) + if !ok || len(obj.Elts) != 1 || + (obj.Lbrace.IsValid() && !f.printer.cfg.simplify) || + (obj.Lbrace.IsValid() && hasDocComments(n)) || + len(n.Attrs) > 0 { + return nil + } + + mem, ok := obj.Elts[0].(*ast.Field) + if !ok || len(mem.Attrs) > 0 { + return nil + } + + if hasDocComments(mem) { + // TODO: this inserts curly braces even in spaces where this + // may not be desirable, such as: + // a: + // // foo + // b: 3 + return nil + } + return mem +} + +func (f *formatter) decl(decl ast.Decl) { + if decl == nil { + return + } + defer f.after(decl) + if !f.before(decl) { + return + } + + switch n := decl.(type) { + case *ast.Field: + f.label(n.Label, n.Optional != token.NoPos) + + regular := isRegularField(n.Token) + if regular { + f.print(noblank, nooverride, n.TokenPos, token.COLON) + } else { + f.print(blank, nooverride, n.Token) + } + + if mem := f.inlineField(n); mem != nil { + switch { + default: + fallthrough + + case regular && f.cfg.simplify: + f.print(blank, nooverride) + f.decl(mem) + + case mem.Label.Pos().IsNewline(): + f.print(indent, formfeed) + f.decl(mem) + f.indent-- + } + return + } + + nextFF := f.nextNeedsFormfeed(n.Value) + tab := vtab + if nextFF { + tab = blank + } + + f.print(tab) + + if n.Value != nil { + switch n.Value.(type) { + case *ast.ListLit, *ast.StructLit: + f.expr(n.Value) + default: + f.print(indent) + f.expr(n.Value) + f.markUnindentLine() + } + } else { + f.current.pos++ + f.visitComments(f.current.pos) + } + + space := tab + for _, a := range n.Attrs { + if f.before(a) { + f.print(space, a.At, a) + } + f.after(a) + space = blank + } + + if nextFF { + f.print(formfeed) + } + + case *ast.BadDecl: + f.print(n.From, "*bad decl*", declcomma) + + case *ast.Package: + f.print(n.PackagePos, "package") + f.print(blank, n.Name, newsection, nooverride) + + case *ast.ImportDecl: + f.print(n.Import, "import") + if len(n.Specs) == 0 { + f.print(blank, n.Lparen, token.LPAREN, n.Rparen, token.RPAREN, newline) + break + } + switch { + case len(n.Specs) == 1 && len(n.Specs[0].Comments()) == 0: + if !n.Lparen.IsValid() { + f.print(blank) + f.walkSpecList(n.Specs) + break + } + fallthrough + default: + f.print(blank, n.Lparen, token.LPAREN, newline, indent) + f.walkSpecList(n.Specs) + f.print(unindent, newline, n.Rparen, token.RPAREN, newline) + } + f.print(newsection, nooverride) + + case *ast.LetClause: + if !decl.Pos().HasRelPos() || decl.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.print(n.Let, token.LET, blank, nooverride) + f.expr(n.Ident) + f.print(blank, nooverride, n.Equal, token.BIND, blank) + f.expr(n.Expr) + f.print(declcomma) // implied + + case *ast.EmbedDecl: + if !n.Pos().HasRelPos() || n.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.expr(n.Expr) + f.print(newline, noblank) + + case *ast.Attribute: + f.print(n.At, n) + + case *ast.CommentGroup: + f.printComment(n) + f.print(newsection) + + case ast.Expr: + f.embedding(n) + } +} + +func (f *formatter) embedding(decl ast.Expr) { + switch n := decl.(type) { + case *ast.Comprehension: + if !n.Pos().HasRelPos() || n.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.walkClauseList(n.Clauses, blank) + f.print(blank, nooverride) + f.expr(n.Value) + + case *ast.Ellipsis: + f.ellipsis(n) + + case *ast.Alias: + if !decl.Pos().HasRelPos() || decl.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.expr(n.Ident) + f.print(blank, n.Equal, token.BIND, blank) + f.expr(n.Expr) + f.print(declcomma) // implied + + // TODO: ast.CommentGroup: allows comment groups in ListLits. + + case ast.Expr: + f.exprRaw(n, token.LowestPrec, 1) + } +} + +func (f *formatter) nextNeedsFormfeed(n ast.Expr) bool { + switch x := n.(type) { + case *ast.StructLit: + return true + case *ast.BasicLit: + return strings.IndexByte(x.Value, '\n') >= 0 + case *ast.ListLit: + return true + } + return false +} + +func (f *formatter) importSpec(x *ast.ImportSpec) { + if x.Name != nil { + f.label(x.Name, false) + f.print(blank) + } else { + f.current.pos++ + f.visitComments(f.current.pos) + } + f.expr(x.Path) + f.print(newline) +} + +func isValidIdent(ident string) bool { + var scan scanner.Scanner + scan.Init(token.NewFile("check", -1, len(ident)), []byte(ident), nil, 0) + + _, tok, lit := scan.Scan() + if tok == token.IDENT || tok.IsKeyword() { + return lit == ident + } + return false +} + +func (f *formatter) label(l ast.Label, optional bool) { + f.before(l) + defer f.after(l) + switch n := l.(type) { + case *ast.Alias: + f.expr(n) + + case *ast.Ident: + // Escape an identifier that has invalid characters. This may happen, + // if the AST is not generated by the parser. + name := n.Name + if !ast.IsValidIdent(name) { + name = literal.String.Quote(n.Name) + } + f.print(n.NamePos, name) + + case *ast.BasicLit: + str := n.Value + // Allow any CUE string in the AST, but ensure it is formatted + // according to spec. + if strings.HasPrefix(str, `"""`) || strings.HasPrefix(str, "#") { + if u, err := literal.Unquote(str); err == nil { + str = literal.String.Quote(u) + } + } + f.print(n.ValuePos, str) + + case *ast.ListLit: + f.expr(n) + + case *ast.ParenExpr: + f.expr(n) + + case *ast.Interpolation: + f.expr(n) + + default: + panic(fmt.Sprintf("unknown label type %T", n)) + } + if optional { + f.print(token.OPTION) + } +} + +func (f *formatter) ellipsis(x *ast.Ellipsis) { + f.print(x.Ellipsis, token.ELLIPSIS) + if x.Type != nil && !isTop(x.Type) { + f.expr(x.Type) + } +} + +func (f *formatter) expr(x ast.Expr) { + const depth = 1 + f.expr1(x, token.LowestPrec, depth) +} + +func (f *formatter) expr0(x ast.Expr, depth int) { + f.expr1(x, token.LowestPrec, depth) +} + +func (f *formatter) expr1(expr ast.Expr, prec1, depth int) { + if f.before(expr) { + f.exprRaw(expr, prec1, depth) + } + f.after(expr) +} + +func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { + + switch x := expr.(type) { + case *ast.BadExpr: + f.print(x.From, "_|_") + + case *ast.BottomLit: + f.print(x.Bottom, token.BOTTOM) + + case *ast.Alias: + // Aliases in expression positions are printed in short form. + f.label(x.Ident, false) + f.print(x.Equal, token.BIND) + f.expr(x.Expr) + + case *ast.Ident: + f.print(x.NamePos, x) + + case *ast.BinaryExpr: + if depth < 1 { + f.internalError("depth < 1:", depth) + depth = 1 + } + f.binaryExpr(x, prec1, cutoff(x, depth), depth) + + case *ast.UnaryExpr: + const prec = token.UnaryPrec + if prec < prec1 { + // parenthesis needed + f.print(token.LPAREN, nooverride) + f.expr(x) + f.print(token.RPAREN) + } else { + // no parenthesis needed + f.print(x.OpPos, x.Op, nooverride) + f.expr1(x.X, prec, depth) + } + + case *ast.BasicLit: + f.print(x.ValuePos, x) + + case *ast.Interpolation: + f.before(nil) + for _, x := range x.Elts { + f.expr0(x, depth+1) + } + f.after(nil) + + case *ast.ParenExpr: + if _, hasParens := x.X.(*ast.ParenExpr); hasParens { + // don't print parentheses around an already parenthesized expression + // TODO: consider making this more general and incorporate precedence levels + f.expr0(x.X, depth) + } else { + f.print(x.Lparen, token.LPAREN) + f.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth + f.print(x.Rparen, token.RPAREN) + } + + case *ast.SelectorExpr: + f.selectorExpr(x, depth) + + case *ast.IndexExpr: + f.expr1(x.X, token.HighestPrec, 1) + f.print(x.Lbrack, token.LBRACK) + f.expr0(x.Index, depth+1) + f.print(x.Rbrack, token.RBRACK) + + case *ast.SliceExpr: + f.expr1(x.X, token.HighestPrec, 1) + f.print(x.Lbrack, token.LBRACK) + indices := []ast.Expr{x.Low, x.High} + for i, y := range indices { + if i > 0 { + // blanks around ":" if both sides exist and either side is a binary expression + x := indices[i-1] + if depth <= 1 && x != nil && y != nil && (isBinary(x) || isBinary(y)) { + f.print(blank, token.COLON, blank) + } else { + f.print(token.COLON) + } + } + if y != nil { + f.expr0(y, depth+1) + } + } + f.print(x.Rbrack, token.RBRACK) + + case *ast.CallExpr: + if len(x.Args) > 1 { + depth++ + } + wasIndented := f.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) + f.print(x.Lparen, token.LPAREN) + f.walkArgsList(x.Args, depth) + f.print(trailcomma, noblank, x.Rparen, token.RPAREN) + if wasIndented { + f.print(unindent) + } + + case *ast.StructLit: + var l line + ws := noblank + ff := f.formfeed() + + switch { + case len(x.Elts) == 0: + if !x.Rbrace.HasRelPos() { + // collapse curly braces if the body is empty. + ffAlt := blank | nooverride + for _, c := range x.Comments() { + if c.Position == 1 { + ffAlt = ff + } + } + ff = ffAlt + } + case !x.Rbrace.HasRelPos() || !x.Elts[0].Pos().HasRelPos(): + ws |= newline | nooverride + } + f.print(x.Lbrace, token.LBRACE, &l, ws, ff, indent) + + f.walkDeclList(x.Elts) + f.matchUnindent() + + ws = noblank + if f.lineout != l { + ws |= newline + if f.lastTok != token.RBRACE && f.lastTok != token.RBRACK { + ws |= nooverride + } + } + f.print(ws, x.Rbrace, token.RBRACE) + + case *ast.ListLit: + f.print(x.Lbrack, token.LBRACK, indent) + f.walkListElems(x.Elts) + f.print(trailcomma, noblank) + f.visitComments(f.current.pos) + f.matchUnindent() + f.print(noblank, x.Rbrack, token.RBRACK) + + case *ast.Ellipsis: + f.ellipsis(x) + + default: + panic(fmt.Sprintf("unimplemented type %T", x)) + } +} + +func (f *formatter) clause(clause ast.Clause) { + switch n := clause.(type) { + case *ast.ForClause: + f.print(n.For, "for", blank) + f.print(indent) + if n.Key != nil { + f.label(n.Key, false) + f.print(n.Colon, token.COMMA, blank) + } else { + f.current.pos++ + f.visitComments(f.current.pos) + } + f.label(n.Value, false) + f.print(blank, n.In, "in", blank) + f.expr(n.Source) + f.markUnindentLine() + + case *ast.IfClause: + f.print(n.If, "if", blank) + f.print(indent) + f.expr(n.Condition) + f.markUnindentLine() + + case *ast.LetClause: + f.print(n.Let, token.LET, blank, nooverride) + f.print(indent) + f.expr(n.Ident) + f.print(blank, nooverride, n.Equal, token.BIND, blank) + f.expr(n.Expr) + f.markUnindentLine() + + default: + panic("unknown clause type") + } +} + +func walkBinary(e *ast.BinaryExpr) (has6, has7, has8 bool, maxProblem int) { + switch e.Op.Precedence() { + case 6: + has6 = true + case 7: + has7 = true + case 8: + has8 = true + } + + switch l := e.X.(type) { + case *ast.BinaryExpr: + if l.Op.Precedence() < e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *syntax.ParenExpr and do nothing. + break + } + h6, h7, h8, mp := walkBinary(l) + has6 = has6 || h6 + has7 = has7 || h7 + has8 = has8 || h8 + if maxProblem < mp { + maxProblem = mp + } + } + + switch r := e.Y.(type) { + case *ast.BinaryExpr: + if r.Op.Precedence() <= e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *syntax.ParenExpr and do nothing. + break + } + h6, h7, h8, mp := walkBinary(r) + has6 = has6 || h6 + has7 = has7 || h7 + has8 = has8 || h8 + if maxProblem < mp { + maxProblem = mp + } + + case *ast.UnaryExpr: + switch e.Op.String() + r.Op.String() { + case "/*": + maxProblem = 8 + case "++", "--": + if maxProblem < 6 { + maxProblem = 6 + } + } + } + return +} + +func cutoff(e *ast.BinaryExpr, depth int) int { + has6, has7, has8, maxProblem := walkBinary(e) + if maxProblem > 0 { + return maxProblem + 1 + } + if (has6 || has7) && has8 { + if depth == 1 { + return 8 + } + if has7 { + return 7 + } + return 6 + } + if has6 && has7 { + if depth == 1 { + return 7 + } + return 6 + } + if depth == 1 { + return 8 + } + return 6 +} + +func diffPrec(expr ast.Expr, prec int) int { + x, ok := expr.(*ast.BinaryExpr) + if !ok || prec != x.Op.Precedence() { + return 1 + } + return 0 +} + +func reduceDepth(depth int) int { + depth-- + if depth < 1 { + depth = 1 + } + return depth +} + +// Format the binary expression: decide the cutoff and then format. +// Let's call depth == 1 Normal mode, and depth > 1 Compact mode. +// (Algorithm suggestion by Russ Cox.) +// +// The precedences are: +// 7 * / % quo rem div mod +// 6 + - +// 5 == != < <= > >= +// 4 && +// 3 || +// 2 & +// 1 | +// +// The only decision is whether there will be spaces around levels 6 and 7. +// There are never spaces at level 8 (unary), and always spaces at levels 5 and below. +// +// To choose the cutoff, look at the whole expression but excluding primary +// expressions (function calls, parenthesized exprs), and apply these rules: +// +// 1) If there is a binary operator with a right side unary operand +// that would clash without a space, the cutoff must be (in order): +// +// /* 8 +// ++ 7 // not necessary, but to avoid confusion +// -- 7 +// +// (Comparison operators always have spaces around them.) +// +// 2) If there is a mix of level 7 and level 6 operators, then the cutoff +// is 7 (use spaces to distinguish precedence) in Normal mode +// and 6 (never use spaces) in Compact mode. +// +// 3) If there are no level 6 operators or no level 7 operators, then the +// cutoff is 8 (always use spaces) in Normal mode +// and 6 (never use spaces) in Compact mode. +// +func (f *formatter) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { + f.nestExpr++ + defer func() { f.nestExpr-- }() + + prec := x.Op.Precedence() + if prec < prec1 { + // parenthesis needed + // Note: The parser inserts an syntax.ParenExpr node; thus this case + // can only occur if the AST is created in a different way. + // defer p.pushComment(nil).pop() + f.print(token.LPAREN, nooverride) + f.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth + f.print(token.RPAREN) + return + } + + printBlank := prec < cutoff + + f.expr1(x.X, prec, depth+diffPrec(x.X, prec)) + f.print(nooverride) + if printBlank { + f.print(blank) + } + f.print(x.OpPos, x.Op) + if x.Y.Pos().IsNewline() { + // at least one line break, but respect an extra empty line + // in the source + f.print(formfeed) + printBlank = false // no blank after line break + } else { + f.print(nooverride) + } + if printBlank { + f.print(blank) + } + f.expr1(x.Y, prec+1, depth+1) +} + +func isBinary(expr ast.Expr) bool { + _, ok := expr.(*ast.BinaryExpr) + return ok +} + +func (f *formatter) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool { + if x, ok := expr.(*ast.SelectorExpr); ok { + return f.selectorExpr(x, depth) + } + f.expr1(expr, prec1, depth) + return false +} + +// selectorExpr handles an *syntax.SelectorExpr node and returns whether x spans +// multiple lines. +func (f *formatter) selectorExpr(x *ast.SelectorExpr, depth int) bool { + f.expr1(x.X, token.HighestPrec, depth) + f.print(token.PERIOD) + if x.Sel.Pos().IsNewline() { + f.print(indent, formfeed) + f.expr(x.Sel.(ast.Expr)) + f.print(unindent) + return true + } + f.print(noblank) + f.expr(x.Sel.(ast.Expr)) + return false +} + +func isTop(e ast.Expr) bool { + ident, ok := e.(*ast.Ident) + return ok && ident.Name == "_" +} diff --git a/vendor/cuelang.org/go/cue/format/printer.go b/vendor/cuelang.org/go/cue/format/printer.go new file mode 100644 index 0000000000..a43154fa6c --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/printer.go @@ -0,0 +1,424 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "fmt" + "os" + "strings" + "text/tabwriter" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +// A printer takes the stream of formatting tokens and spacing directives +// produced by the formatter and adjusts the spacing based on the original +// source code. +type printer struct { + cfg *config + + allowed whiteSpace + requested whiteSpace + indentStack []whiteSpace + + pos token.Position // current pos in AST + lineout line + + lastTok token.Token // last token printed (syntax.ILLEGAL if it's whitespace) + + output []byte + indent int + spaceBefore bool + + errs errors.Error +} + +type line int + +func (p *printer) init(cfg *config) { + p.cfg = cfg + p.pos = token.Position{Line: 1, Column: 1} +} + +func (p *printer) errf(n ast.Node, format string, args ...interface{}) { + p.errs = errors.Append(p.errs, errors.Newf(n.Pos(), format, args...)) +} + +const debug = false + +func (p *printer) internalError(msg ...interface{}) { + if debug { + fmt.Print(p.pos.String() + ": ") + fmt.Println(msg...) + panic("go/printer") + } +} + +func (p *printer) lineFor(pos token.Pos) int { + return pos.Line() +} + +func (p *printer) Print(v interface{}) { + var ( + impliedComma = false + isLit bool + data string + nextWS whiteSpace + ) + switch x := v.(type) { + case *line: + *x = p.lineout + + case token.Token: + s := x.String() + before, after := mayCombine(p.lastTok, x) + if before && !p.spaceBefore { + // the previous and the current token must be + // separated by a blank otherwise they combine + // into a different incorrect token sequence + // (except for syntax.INT followed by a '.' this + // should never happen because it is taken care + // of via binary expression formatting) + if p.allowed&blank != 0 { + p.internalError("whitespace buffer not empty") + } + p.allowed |= blank + } + if after { + nextWS = blank + } + data = s + switch x { + case token.EOF: + data = "" + p.allowed = newline + p.allowed &^= newsection + case token.LPAREN, token.LBRACK, token.LBRACE: + case token.RPAREN, token.RBRACK, token.RBRACE: + impliedComma = true + } + p.lastTok = x + + case *ast.BasicLit: + data = x.Value + switch x.Kind { + case token.STRING: + // TODO: only do this when simplifying. Right now this does not + // give the right result, but it should be better if: + // 1) simplification is done as a separate step + // 2) simplified structs are explicitly referenced separately + // in the AST. + if p.indent < 6 { + data = literal.IndentTabs(data, p.cfg.Indent+p.indent+1) + } + + case token.INT: + if len(data) > 1 && + data[0] == '0' && + data[1] >= '0' && data[1] <= '9' { + data = "0o" + data[1:] + } + // Pad trailing dot before multiplier. + if p := strings.IndexByte(data, '.'); p >= 0 && data[p+1] > '9' { + data = data[:p+1] + "0" + data[p+1:] + } + // Lowercase E, but only if it is not the last character: in the + // future we may use E for Exa. + if p := strings.IndexByte(data, 'E'); p != -1 && p < len(data)-1 { + data = strings.ToLower(data) + } + + case token.FLOAT: + // Pad leading or trailing dots. + switch p := strings.IndexByte(data, '.'); { + case p < 0: + case p == 0: + data = "0" + data + case p == len(data)-1: + data += "0" + case data[p+1] > '9': + data = data[:p+1] + "0" + data[p+1:] + } + if strings.IndexByte(data, 'E') != -1 { + data = strings.ToLower(data) + } + } + + isLit = true + impliedComma = true + p.lastTok = x.Kind + + case *ast.Ident: + data = x.Name + if !ast.IsValidIdent(data) { + p.errf(x, "invalid identifier %q", x.Name) + data = "*bad identifier*" + } + impliedComma = true + p.lastTok = token.IDENT + + case string: + data = x + impliedComma = true + p.lastTok = token.STRING + + case *ast.CommentGroup: + rel := x.Pos().RelPos() + if x.Line { // TODO: we probably don't need this. + rel = token.Blank + } + switch rel { + case token.NoRelPos: + case token.Newline, token.NewSection: + case token.Blank, token.Elided: + p.allowed |= blank + fallthrough + case token.NoSpace: + p.allowed &^= newline | newsection | formfeed | declcomma + } + return + + case *ast.Attribute: + data = x.Text + impliedComma = true + p.lastTok = token.ATTRIBUTE + + case *ast.Comment: + // TODO: if implied comma, postpone comment + data = x.Text + p.lastTok = token.COMMENT + + case whiteSpace: + p.allowed |= x + return + + case token.Pos: + // TODO: should we use a known file position to synchronize? Go does, + // but we don't really have to. + // pos := x + if x.HasRelPos() { + if p.allowed&nooverride == 0 { + requested := p.allowed + switch x.RelPos() { + case token.NoSpace: + requested &^= newline | newsection | formfeed + case token.Blank: + requested |= blank + requested &^= newline | newsection | formfeed + case token.Newline: + requested |= newline + case token.NewSection: + requested |= newsection + } + p.writeWhitespace(requested) + p.allowed = 0 + p.requested = 0 + } + // p.pos = pos + } + return + + default: + fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", x, x) + panic("go/printer type") + } + + p.writeWhitespace(p.allowed) + p.allowed = 0 + p.requested = 0 + p.writeString(data, isLit) + p.allowed = nextWS + _ = impliedComma // TODO: delay comment printings +} + +func (p *printer) writeWhitespace(ws whiteSpace) { + if ws&comma != 0 { + switch { + case ws&(newsection|newline|formfeed) != 0, + ws&trailcomma == 0: + p.writeByte(',', 1) + } + } + if ws&indent != 0 { + p.markLineIndent(ws) + } + if ws&unindent != 0 { + p.markUnindentLine() + } + switch { + case ws&newsection != 0: + p.maybeIndentLine(ws) + p.writeByte('\f', 2) + p.lineout += 2 + p.spaceBefore = true + case ws&formfeed != 0: + p.maybeIndentLine(ws) + p.writeByte('\f', 1) + p.lineout++ + p.spaceBefore = true + case ws&newline != 0: + p.maybeIndentLine(ws) + p.writeByte('\n', 1) + p.lineout++ + p.spaceBefore = true + case ws&declcomma != 0: + p.writeByte(',', 1) + p.writeByte(' ', 1) + p.spaceBefore = true + case ws&noblank != 0: + case ws&vtab != 0: + p.writeByte('\v', 1) + p.spaceBefore = true + case ws&blank != 0: + p.writeByte(' ', 1) + p.spaceBefore = true + } +} + +func (p *printer) markLineIndent(ws whiteSpace) { + p.indentStack = append(p.indentStack, ws) +} + +func (p *printer) markUnindentLine() (wasUnindented bool) { + last := len(p.indentStack) - 1 + if ws := p.indentStack[last]; ws&indented != 0 { + p.indent-- + wasUnindented = true + } + p.indentStack = p.indentStack[:last] + return wasUnindented +} + +func (p *printer) maybeIndentLine(ws whiteSpace) { + if ws&unindent == 0 && len(p.indentStack) > 0 { + last := len(p.indentStack) - 1 + if ws := p.indentStack[last]; ws&indented != 0 || ws&indent == 0 { + return + } + p.indentStack[last] |= indented + p.indent++ + } +} + +func (f *formatter) matchUnindent() whiteSpace { + f.allowed |= unindent + // TODO: make this work. Whitespace from closing bracket should match that + // of opening if there is no position information. + // f.allowed &^= nooverride | newline | newsection | formfeed | blank | noblank + // ws := f.indentStack[len(f.indentStack)-1] + // mask := blank | noblank | vtab + // f.allowed |= unindent | blank | noblank + // if ws&newline != 0 || ws*indented != 0 { + // f.allowed |= newline + // } + return 0 +} + +// writeString writes the string s to p.output and updates p.pos, p.out, +// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters +// to protect s from being interpreted by the tabwriter. +// +// Note: writeString is only used to write Go tokens, literals, and +// comments, all of which must be written literally. Thus, it is correct +// to always set isLit = true. However, setting it explicitly only when +// needed (i.e., when we don't know that s contains no tabs or line breaks) +// avoids processing extra escape characters and reduces run time of the +// printer benchmark by up to 10%. +// +func (p *printer) writeString(s string, isLit bool) { + if s != "" { + p.spaceBefore = false + } + + if isLit { + // Protect s such that is passes through the tabwriter + // unchanged. Note that valid Go programs cannot contain + // tabwriter.Escape bytes since they do not appear in legal + // UTF-8 sequences. + p.output = append(p.output, tabwriter.Escape) + } + + p.output = append(p.output, s...) + + if isLit { + p.output = append(p.output, tabwriter.Escape) + } + // update positions + nLines := 0 + var li int // index of last newline; valid if nLines > 0 + for i := 0; i < len(s); i++ { + // CUE tokens cannot contain '\f' - no need to look for it + if s[i] == '\n' { + nLines++ + li = i + } + } + p.pos.Offset += len(s) + if nLines > 0 { + p.pos.Line += nLines + c := len(s) - li + p.pos.Column = c + } else { + p.pos.Column += len(s) + } +} + +func (p *printer) writeByte(ch byte, n int) { + for i := 0; i < n; i++ { + p.output = append(p.output, ch) + } + + // update positions + p.pos.Offset += n + if ch == '\n' || ch == '\f' { + p.pos.Line += n + p.pos.Column = 1 + + n := p.cfg.Indent + p.indent // include base indentation + for i := 0; i < n; i++ { + p.output = append(p.output, '\t') + } + + // update positions + p.pos.Offset += n + p.pos.Column += n + + return + } + p.pos.Column += n +} + +func mayCombine(prev, next token.Token) (before, after bool) { + s := next.String() + if 'a' <= s[0] && s[0] < 'z' { + return true, true + } + switch prev { + case token.IQUO, token.IREM, token.IDIV, token.IMOD: + return false, false + case token.INT: + before = next == token.PERIOD // 1. + case token.ADD: + before = s[0] == '+' // ++ + case token.SUB: + before = s[0] == '-' // -- + case token.QUO: + before = s[0] == '*' // /* + } + return before, false +} diff --git a/vendor/cuelang.org/go/cue/format/simplify.go b/vendor/cuelang.org/go/cue/format/simplify.go new file mode 100644 index 0000000000..f4981978cc --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/simplify.go @@ -0,0 +1,113 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "strconv" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/internal" +) + +// labelSimplifier rewrites string labels to identifiers if +// no identifiers will subsequently bind to the exposed label. +// In other words, string labels are only replaced if this does +// not change the semantics of the CUE code. +type labelSimplifier struct { + parent *labelSimplifier + scope map[string]bool +} + +func (s *labelSimplifier) processDecls(decls []ast.Decl) { + sc := labelSimplifier{parent: s, scope: map[string]bool{}} + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + ast.Walk(x.Label, sc.markStrings, nil) + } + } + + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + ast.Walk(x.Value, sc.markReferences, nil) + default: + ast.Walk(x, sc.markReferences, nil) + } + } + + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + x.Label = astutil.Apply(x.Label, sc.replace, nil).(ast.Label) + } + } +} + +func (s *labelSimplifier) markReferences(n ast.Node) bool { + // Record strings at this level. + switch x := n.(type) { + case *ast.File: + s.processDecls(x.Decls) + return false + + case *ast.StructLit: + s.processDecls(x.Elts) + return false + + case *ast.SelectorExpr: + ast.Walk(x.X, s.markReferences, nil) + return false + + case *ast.Ident: + for c := s; c != nil; c = c.parent { + if _, ok := c.scope[x.Name]; ok { + c.scope[x.Name] = false + break + } + } + } + return true +} + +func (s *labelSimplifier) markStrings(n ast.Node) bool { + switch x := n.(type) { + case *ast.BasicLit: + str, err := strconv.Unquote(x.Value) + if err != nil || !ast.IsValidIdent(str) || internal.IsDefOrHidden(str) { + return false + } + s.scope[str] = true + + case *ast.Ident: + s.scope[x.Name] = true + + case *ast.ListLit, *ast.Interpolation: + return false + } + return true +} + +func (s *labelSimplifier) replace(c astutil.Cursor) bool { + switch x := c.Node().(type) { + case *ast.BasicLit: + str, err := strconv.Unquote(x.Value) + if err == nil && s.scope[str] && !internal.IsDefOrHidden(str) { + c.Replace(ast.NewIdent(str)) + } + } + return true +} diff --git a/vendor/cuelang.org/go/cue/instance.go b/vendor/cuelang.org/go/cue/instance.go new file mode 100644 index 0000000000..0fe93c49fd --- /dev/null +++ b/vendor/cuelang.org/go/cue/instance.go @@ -0,0 +1,357 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/internal" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/compile" + "cuelang.org/go/internal/core/runtime" +) + +// An InstanceOrValue is implemented by Value and *Instance. +// +// This is a placeholder type that is used to allow Instance-based APIs to +// transition to Value-based APIs. The goals is to get rid of the Instance +// type before v1.0.0. +type InstanceOrValue interface { + Value() Value + + internal() +} + +func (Value) internal() {} +func (*Instance) internal() {} + +// Value implements value.Instance. +func (v hiddenValue) Value() Value { return v } + +// An Instance defines a single configuration based on a collection of +// underlying CUE files. +type Instance struct { + index *runtime.Runtime + + root *adt.Vertex + + ImportPath string + Dir string + PkgName string + DisplayName string + + Incomplete bool // true if Pkg and all its dependencies are free of errors + Err errors.Error // non-nil if the package had errors + + inst *build.Instance +} + +type hiddenInstance = Instance + +func addInst(x *runtime.Runtime, p *Instance) *Instance { + if p.inst == nil { + p.inst = &build.Instance{ + ImportPath: p.ImportPath, + PkgName: p.PkgName, + } + } + x.AddInst(p.ImportPath, p.root, p.inst) + x.SetBuildData(p.inst, p) + p.index = x + return p +} + +func lookupInstance(x *runtime.Runtime, p *build.Instance) *Instance { + if x, ok := x.BuildData(p); ok { + return x.(*Instance) + } + return nil +} + +func getImportFromBuild(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance { + inst := lookupInstance(x, p) + + if inst != nil { + return inst + } + + inst = &Instance{ + ImportPath: p.ImportPath, + Dir: p.Dir, + PkgName: p.PkgName, + DisplayName: p.ImportPath, + root: v, + inst: p, + index: x, + } + if p.Err != nil { + inst.setListOrError(p.Err) + } + + x.SetBuildData(p, inst) + + return inst +} + +func getImportFromNode(x *runtime.Runtime, v *adt.Vertex) *Instance { + p := x.GetInstanceFromNode(v) + if p == nil { + return nil + } + + return getImportFromBuild(x, p, v) +} + +func getImportFromPath(x *runtime.Runtime, id string) *Instance { + node := x.LoadImport(id) + if node == nil { + return nil + } + b := x.GetInstanceFromNode(node) + inst := lookupInstance(x, b) + if inst == nil { + inst = &Instance{ + ImportPath: b.ImportPath, + PkgName: b.PkgName, + root: node, + inst: b, + index: x, + } + } + return inst +} + +func init() { + internal.MakeInstance = func(value interface{}) interface{} { + v := value.(Value) + x := v.eval(v.ctx()) + st, ok := x.(*adt.Vertex) + if !ok { + st = &adt.Vertex{} + st.AddConjunct(adt.MakeRootConjunct(nil, x)) + } + return addInst(v.idx, &Instance{ + root: st, + }) + } +} + +// newInstance creates a new instance. Use Insert to populate the instance. +func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance { + // TODO: associate root source with structLit. + inst := &Instance{ + root: v, + inst: p, + } + if p != nil { + inst.ImportPath = p.ImportPath + inst.Dir = p.Dir + inst.PkgName = p.PkgName + inst.DisplayName = p.ImportPath + if p.Err != nil { + inst.setListOrError(p.Err) + } + } + + x.AddInst(p.ImportPath, v, p) + x.SetBuildData(p, inst) + inst.index = x + return inst +} + +func (inst *Instance) setListOrError(err errors.Error) { + inst.Incomplete = true + inst.Err = errors.Append(inst.Err, err) +} + +func (inst *Instance) setError(err errors.Error) { + inst.Incomplete = true + inst.Err = errors.Append(inst.Err, err) +} + +func (inst *Instance) eval(ctx *adt.OpContext) adt.Value { + // TODO: remove manifest here? + v := manifest(ctx, inst.root) + return v +} + +// ID returns the package identifier that uniquely qualifies module and +// package name. +func (inst *Instance) ID() string { + if inst == nil || inst.inst == nil { + return "" + } + return inst.inst.ID() +} + +// Doc returns the package comments for this instance. +// +// Deprecated: use inst.Value().Doc() +func (inst *hiddenInstance) Doc() []*ast.CommentGroup { + return inst.Value().Doc() +} + +// Value returns the root value of the configuration. If the configuration +// defines in emit value, it will be that value. Otherwise it will be all +// top-level values. +func (inst *Instance) Value() Value { + ctx := newContext(inst.index) + inst.root.Finalize(ctx) + return newVertexRoot(inst.index, ctx, inst.root) +} + +// Eval evaluates an expression within an existing instance. +// +// Expressions may refer to builtin packages if they can be uniquely identified. +// +// Deprecated: use +// inst.Value().Context().BuildExpr(expr, Scope(inst.Value), InferBuiltins(true)) +func (inst *hiddenInstance) Eval(expr ast.Expr) Value { + v := inst.Value() + return v.Context().BuildExpr(expr, Scope(v), InferBuiltins(true)) +} + +// DO NOT USE. +// +// Deprecated: do not use. +func Merge(inst ...*Instance) *Instance { + v := &adt.Vertex{} + + i := inst[0] + ctx := newContext(i.index) + + // TODO: interesting test: use actual unification and then on K8s corpus. + + for _, i := range inst { + w := i.Value() + v.AddConjunct(adt.MakeRootConjunct(nil, w.v.ToDataAll())) + } + v.Finalize(ctx) + + p := addInst(i.index, &Instance{ + root: v, + }) + return p +} + +// Build creates a new instance from the build instances, allowing unbound +// identifier to bind to the top-level field in inst. The top-level fields in +// inst take precedence over predeclared identifier and builtin functions. +// +// Deprecated: use Context.Build +func (inst *hiddenInstance) Build(p *build.Instance) *Instance { + p.Complete() + + idx := inst.index + r := inst.index + + rErr := r.ResolveFiles(p) + + cfg := &compile.Config{Scope: valueScope(Value{idx: r, v: inst.root})} + v, err := compile.Files(cfg, r, p.ID(), p.Files...) + + v.AddConjunct(adt.MakeRootConjunct(nil, inst.root)) + + i := newInstance(idx, p, v) + if rErr != nil { + i.setListOrError(rErr) + } + if i.Err != nil { + i.setListOrError(i.Err) + } + + if err != nil { + i.setListOrError(err) + } + + return i +} + +func (inst *Instance) value() Value { + return newVertexRoot(inst.index, newContext(inst.index), inst.root) +} + +// Lookup reports the value at a path starting from the top level struct. The +// Exists method of the returned value will report false if the path did not +// exist. The Err method reports if any error occurred during evaluation. The +// empty path returns the top-level configuration struct. Use LookupDef for definitions or LookupField for +// any kind of field. +// +// Deprecated: use Value.LookupPath +func (inst *hiddenInstance) Lookup(path ...string) Value { + return inst.value().Lookup(path...) +} + +// LookupDef reports the definition with the given name within struct v. The +// Exists method of the returned value will report false if the definition did +// not exist. The Err method reports if any error occurred during evaluation. +// +// Deprecated: use Value.LookupPath +func (inst *hiddenInstance) LookupDef(path string) Value { + return inst.value().LookupDef(path) +} + +// LookupField reports a Field at a path starting from v, or an error if the +// path is not. The empty path returns v itself. +// +// It cannot look up hidden or unexported fields. +// +// Deprecated: this API does not work with new-style definitions. Use +// FieldByName defined on inst.Value(). +// +// Deprecated: use Value.LookupPath +func (inst *hiddenInstance) LookupField(path ...string) (f FieldInfo, err error) { + v := inst.value() + for _, k := range path { + s, err := v.Struct() + if err != nil { + return f, err + } + + f, err = s.FieldByName(k, true) + if err != nil { + return f, err + } + if f.IsHidden { + return f, errNotFound + } + v = f.Value + } + return f, err +} + +// Fill creates a new instance with the values of the old instance unified with +// the given value. It is not possible to update the emit value. +// +// Values may be any Go value that can be converted to CUE, an ast.Expr or +// a Value. In the latter case, it will panic if the Value is not from the same +// Runtime. +// +// Deprecated: use Value.FillPath() +func (inst *hiddenInstance) Fill(x interface{}, path ...string) (*Instance, error) { + v := inst.Value().Fill(x, path...) + + inst = addInst(inst.index, &Instance{ + root: v.v, + inst: nil, + + // Omit ImportPath to indicate this is not an importable package. + Dir: inst.Dir, + PkgName: inst.PkgName, + Incomplete: inst.Incomplete, + }) + return inst, nil +} diff --git a/vendor/cuelang.org/go/cue/literal/doc.go b/vendor/cuelang.org/go/cue/literal/doc.go new file mode 100644 index 0000000000..3d3095c6ce --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/doc.go @@ -0,0 +1,17 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package literal implements conversions to and from string representations of +// basic data types. +package literal diff --git a/vendor/cuelang.org/go/cue/literal/indent.go b/vendor/cuelang.org/go/cue/literal/indent.go new file mode 100644 index 0000000000..193ca3b440 --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/indent.go @@ -0,0 +1,33 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package literal + +import "strings" + +// IndentTabs takes a quoted string and reindents it for the given indentation. +// If a string is not a multiline string it will return the string as is. +func IndentTabs(s string, n int) string { + indent := tabs(n) + + qi, _, _, err := ParseQuotes(s, s) + if err != nil || !qi.multiline || qi.whitespace == indent { + return s + } + + search := "\n" + qi.whitespace + replace := "\n" + indent + + return strings.ReplaceAll(s, search, replace) +} diff --git a/vendor/cuelang.org/go/cue/literal/num.go b/vendor/cuelang.org/go/cue/literal/num.go new file mode 100644 index 0000000000..bb77d5b2f2 --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/num.go @@ -0,0 +1,357 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package literal + +import ( + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "github.com/cockroachdb/apd/v2" +) + +var baseContext apd.Context + +func init() { + baseContext = apd.BaseContext + baseContext.Precision = 24 +} + +// NumInfo contains information about a parsed numbers. +// +// Reusing a NumInfo across parses may avoid memory allocations. +type NumInfo struct { + pos token.Pos + src string + p int + ch byte + buf []byte + + mul Multiplier + base byte + neg bool + UseSep bool + isFloat bool + err error +} + +// String returns a canonical string representation of the number so that +// it can be parsed with math.Float.Parse. +func (p *NumInfo) String() string { + if len(p.buf) > 0 && p.base == 10 && p.mul == 0 { + return string(p.buf) + } + var d apd.Decimal + _ = p.decimal(&d) + return d.String() +} + +type decimal = apd.Decimal + +// Decimal is for internal use. +func (p *NumInfo) Decimal(v *decimal) error { + return p.decimal(v) +} + +func (p *NumInfo) decimal(v *apd.Decimal) error { + if p.base != 10 { + _, _, _ = v.SetString("0") + b := p.buf + if p.buf[0] == '-' { + v.Negative = p.neg + b = p.buf[1:] + } + v.Coeff.SetString(string(b), int(p.base)) + return nil + } + _ = v.UnmarshalText(p.buf) + if p.mul != 0 { + _, _ = baseContext.Mul(v, v, mulToRat[p.mul]) + cond, _ := baseContext.RoundToIntegralExact(v, v) + if cond.Inexact() { + return p.errorf("number cannot be represented as int") + } + } + return nil +} + +// Multiplier reports which multiplier was used in an integral number. +func (p *NumInfo) Multiplier() Multiplier { + return p.mul +} + +// IsInt reports whether the number is an integral number. +func (p *NumInfo) IsInt() bool { + return !p.isFloat +} + +// ParseNum parses s and populates NumInfo with the result. +func ParseNum(s string, n *NumInfo) error { + *n = NumInfo{pos: n.pos, src: s, buf: n.buf[:0]} + if !n.next() { + return n.errorf("invalid number %q", s) + } + if n.ch == '-' { + n.neg = true + n.buf = append(n.buf, '-') + n.next() + } + seenDecimalPoint := false + if n.ch == '.' { + n.next() + seenDecimalPoint = true + } + err := n.scanNumber(seenDecimalPoint) + if err != nil { + return err + } + if n.err != nil { + return n.err + } + if n.p < len(n.src) { + return n.errorf("invalid number %q", s) + } + if len(n.buf) == 0 { + n.buf = append(n.buf, '0') + } + return nil +} + +func (p *NumInfo) errorf(format string, args ...interface{}) error { + return errors.Newf(p.pos, format, args...) +} + +// A Multiplier indicates a multiplier indicator used in the literal. +type Multiplier byte + +const ( + mul1 Multiplier = 1 + iota + mul2 + mul3 + mul4 + mul5 + mul6 + mul7 + mul8 + + mulBin = 0x10 + mulDec = 0x20 + + K = mulDec | mul1 + M = mulDec | mul2 + G = mulDec | mul3 + T = mulDec | mul4 + P = mulDec | mul5 + E = mulDec | mul6 + Z = mulDec | mul7 + Y = mulDec | mul8 + + Ki = mulBin | mul1 + Mi = mulBin | mul2 + Gi = mulBin | mul3 + Ti = mulBin | mul4 + Pi = mulBin | mul5 + Ei = mulBin | mul6 + Zi = mulBin | mul7 + Yi = mulBin | mul8 +) + +func (p *NumInfo) next() bool { + if p.p >= len(p.src) { + p.ch = 0 + return false + } + p.ch = p.src[p.p] + p.p++ + if p.ch == '.' { + if len(p.buf) == 0 { + p.buf = append(p.buf, '0') + } + p.buf = append(p.buf, '.') + } + return true +} + +func (p *NumInfo) digitVal(ch byte) (d int) { + switch { + case '0' <= ch && ch <= '9': + d = int(ch - '0') + case ch == '_': + p.UseSep = true + return 0 + case 'a' <= ch && ch <= 'f': + d = int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + d = int(ch - 'A' + 10) + default: + return 16 // larger than any legal digit val + } + return d +} + +func (p *NumInfo) scanMantissa(base int) bool { + hasDigit := false + var last byte + for p.digitVal(p.ch) < base { + if p.ch != '_' { + p.buf = append(p.buf, p.ch) + hasDigit = true + } + last = p.ch + p.next() + } + if last == '_' { + p.err = p.errorf("illegal '_' in number") + } + return hasDigit +} + +func (p *NumInfo) scanNumber(seenDecimalPoint bool) error { + p.base = 10 + + if seenDecimalPoint { + p.isFloat = true + if !p.scanMantissa(10) { + return p.errorf("illegal fraction %q", p.src) + } + goto exponent + } + + if p.ch == '0' { + // int or float + p.next() + switch p.ch { + case 'x', 'X': + p.base = 16 + // hexadecimal int + p.next() + if !p.scanMantissa(16) { + // only scanned "0x" or "0X" + return p.errorf("illegal hexadecimal number %q", p.src) + } + case 'b': + p.base = 2 + // binary int + p.next() + if !p.scanMantissa(2) { + // only scanned "0b" + return p.errorf("illegal binary number %q", p.src) + } + case 'o': + p.base = 8 + // octal int + p.next() + if !p.scanMantissa(8) { + // only scanned "0o" + return p.errorf("illegal octal number %q", p.src) + } + default: + // int (base 8 or 10) or float + p.scanMantissa(8) + if p.ch == '8' || p.ch == '9' { + p.scanMantissa(10) + if p.ch != '.' && p.ch != 'e' && p.ch != 'E' { + return p.errorf("illegal integer number %q", p.src) + } + } + switch p.ch { + case 'e', 'E': + if len(p.buf) == 0 { + p.buf = append(p.buf, '0') + } + fallthrough + case '.': + goto fraction + } + if len(p.buf) > 0 { + p.base = 8 + } + } + goto exit + } + + // decimal int or float + if !p.scanMantissa(10) { + return p.errorf("illegal number start %q", p.src) + } + +fraction: + if p.ch == '.' { + p.isFloat = true + p.next() + p.scanMantissa(10) + } + +exponent: + switch p.ch { + case 'K', 'M', 'G', 'T', 'P': + p.mul = charToMul[p.ch] + p.next() + if p.ch == 'i' { + p.mul |= mulBin + p.next() + } else { + p.mul |= mulDec + } + var v apd.Decimal + p.isFloat = false + return p.decimal(&v) + + case 'e', 'E': + p.isFloat = true + p.next() + p.buf = append(p.buf, 'e') + if p.ch == '-' || p.ch == '+' { + p.buf = append(p.buf, p.ch) + p.next() + } + if !p.scanMantissa(10) { + return p.errorf("illegal exponent %q", p.src) + } + } + +exit: + return nil +} + +var charToMul = map[byte]Multiplier{ + 'K': mul1, + 'M': mul2, + 'G': mul3, + 'T': mul4, + 'P': mul5, + 'E': mul6, + 'Z': mul7, + 'Y': mul8, +} + +var mulToRat = map[Multiplier]*apd.Decimal{} + +func init() { + d := apd.New(1, 0) + b := apd.New(1, 0) + dm := apd.New(1000, 0) + bm := apd.New(1024, 0) + + c := apd.BaseContext + for i := Multiplier(1); int(i) < len(charToMul); i++ { + // TODO: may we write to one of the sources? + var bn, dn apd.Decimal + _, _ = c.Mul(&dn, d, dm) + d = &dn + _, _ = c.Mul(&bn, b, bm) + b = &bn + mulToRat[mulDec|i] = d + mulToRat[mulBin|i] = b + } +} diff --git a/vendor/cuelang.org/go/cue/literal/quote.go b/vendor/cuelang.org/go/cue/literal/quote.go new file mode 100644 index 0000000000..9cbe6e82f9 --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/quote.go @@ -0,0 +1,370 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package literal + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Form defines how to quote a string or bytes literal. +type Form struct { + hashCount int + quote byte + multiline bool + auto bool + exact bool + asciiOnly bool + graphicOnly bool + indent string + tripleQuote string +} + +// TODO: +// - Fixed or max level of escape modifiers (#""#). +// - Option to fall back to bytes if value cannot be represented as string. +// E.g. ExactString. +// - QuoteExact that fails with an error if a string cannot be represented +// without loss. +// - Handle auto-breaking for long lines (Swift-style, \-terminated lines). +// This is not supported yet in CUE, but may, and should be considered as +// a possibility in API design. +// - Other possible convenience forms: Blob (auto-break bytes), String (bytes +// or string), Label. + +// WithTabIndent returns a new Form with indentation set to the given number +// of tabs. The result will be a multiline string. +func (f Form) WithTabIndent(n int) Form { + f.indent = tabs(n) + f.multiline = true + return f +} + +const tabIndent = "\t\t\t\t\t\t\t\t\t\t\t\t" + +func tabs(n int) string { + if n < len(tabIndent) { + return tabIndent[:n] + } + return strings.Repeat("\t", n) +} + +// WithOptionalIndent is like WithTabIndent, but only returns a multiline +// strings if it doesn't contain any newline characters. +func (f Form) WithOptionalTabIndent(tabs int) Form { + if tabs < len(tabIndent) { + f.indent = tabIndent[:tabs] + } else { + f.indent = strings.Repeat("\t", tabs) + } + f.auto = true + return f +} + +// WithASCIIOnly ensures the quoted strings consists solely of valid ASCII +// characters. +func (f Form) WithASCIIOnly() Form { + f.asciiOnly = true + return f +} + +// WithGraphicOnly ensures the quoted strings consists solely of printable +// characters. +func (f Form) WithGraphicOnly() Form { + f.graphicOnly = true + return f +} + +var ( + // String defines the format of a CUE string. Conversions may be lossy. + String Form = stringForm + + // TODO: ExactString: quotes to bytes type if the string cannot be + // represented without loss of accuracy. + + // Label is like Text, but optimized for labels. + Label Form = stringForm + + // Bytes defines the format of bytes literal. + Bytes Form = bytesForm + + stringForm = Form{ + quote: '"', + tripleQuote: `"""`, + } + bytesForm = Form{ + quote: '\'', + tripleQuote: `'''`, + exact: true, + } +) + +// Quote returns CUE string literal representing s. The returned string uses CUE +// escape sequences (\t, \n, \u00FF, \u0100) for control characters and +// non-printable characters as defined by strconv.IsPrint. +// +// It reports an error if the string cannot be converted to the desired form. +func (f Form) Quote(s string) string { + return string(f.Append(make([]byte, 0, 3*len(s)/2), s)) +} + +const ( + lowerhex = "0123456789abcdef" +) + +// Append appends a CUE string literal representing s, as generated by Quote, to +// buf and returns the extended buffer. +func (f Form) Append(buf []byte, s string) []byte { + if f.auto && strings.ContainsRune(s, '\n') { + f.multiline = true + } + if f.multiline { + f.hashCount = f.requiredHashCount(s) + } + + // Often called with big strings, so preallocate. If there's quoting, + // this is conservative but still helps a lot. + if cap(buf)-len(buf) < len(s) { + nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1) + copy(nBuf, buf) + buf = nBuf + } + for i := 0; i < f.hashCount; i++ { + buf = append(buf, '#') + } + if f.multiline { + buf = append(buf, f.quote, f.quote, f.quote, '\n') + if s == "" { + buf = append(buf, f.indent...) + buf = append(buf, f.quote, f.quote, f.quote) + return buf + } + if len(s) > 0 && s[0] != '\n' { + buf = append(buf, f.indent...) + } + } else { + buf = append(buf, f.quote) + } + + buf = f.appendEscaped(buf, s) + + if f.multiline { + buf = append(buf, '\n') + buf = append(buf, f.indent...) + buf = append(buf, f.quote, f.quote, f.quote) + } else { + buf = append(buf, f.quote) + } + for i := 0; i < f.hashCount; i++ { + buf = append(buf, '#') + } + + return buf +} + +// AppendEscaped appends a CUE string literal representing s, as generated by +// Quote but without the quotes, to buf and returns the extended buffer. +// +// It does not include the last indentation. +func (f Form) AppendEscaped(buf []byte, s string) []byte { + if f.auto && strings.ContainsRune(s, '\n') { + f.multiline = true + } + + // Often called with big strings, so preallocate. If there's quoting, + // this is conservative but still helps a lot. + if cap(buf)-len(buf) < len(s) { + nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1) + copy(nBuf, buf) + buf = nBuf + } + + buf = f.appendEscaped(buf, s) + + return buf +} + +func (f Form) appendEscaped(buf []byte, s string) []byte { + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if f.exact && width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + if f.multiline && r == '\n' { + buf = append(buf, '\n') + if len(s) > 1 && s[1] != '\n' { + buf = append(buf, f.indent...) + } + continue + } + buf = f.appendEscapedRune(buf, r) + } + return buf +} + +func (f *Form) appendEscapedRune(buf []byte, r rune) []byte { + var runeTmp [utf8.UTFMax]byte + if (!f.multiline && r == rune(f.quote)) || r == '\\' { // always backslashed + buf = f.appendEscape(buf) + buf = append(buf, byte(r)) + return buf + } + if f.asciiOnly { + if r < utf8.RuneSelf && strconv.IsPrint(r) { + buf = append(buf, byte(r)) + return buf + } + } else if strconv.IsPrint(r) || f.graphicOnly && isInGraphicList(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + return buf + } + buf = f.appendEscape(buf) + switch r { + case '\a': + buf = append(buf, 'a') + case '\b': + buf = append(buf, 'b') + case '\f': + buf = append(buf, 'f') + case '\n': + buf = append(buf, 'n') + case '\r': + buf = append(buf, 'r') + case '\t': + buf = append(buf, 't') + case '\v': + buf = append(buf, 'v') + default: + switch { + case r < ' ' && f.exact: + buf = append(buf, 'x') + buf = append(buf, lowerhex[byte(r)>>4]) + buf = append(buf, lowerhex[byte(r)&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, 'u') + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, 'U') + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + return buf +} + +func (f *Form) appendEscape(buf []byte) []byte { + buf = append(buf, '\\') + for i := 0; i < f.hashCount; i++ { + buf = append(buf, '#') + } + return buf +} + +// requiredHashCount returns the number of # characters +// that are required to quote the multiline string s. +func (f *Form) requiredHashCount(s string) int { + hashCount := 0 + i := 0 + // Find all occurrences of the triple-quote and count + // the maximum number of succeeding # characters. + for { + j := strings.Index(s[i:], f.tripleQuote) + if j == -1 { + break + } + i += j + 3 + // Absorb all extra quotes, so we + // get to the end of the sequence. + for ; i < len(s); i++ { + if s[i] != f.quote { + break + } + } + e := i - 1 + // Count succeeding # characters. + for ; i < len(s); i++ { + if s[i] != '#' { + break + } + } + if nhash := i - e; nhash > hashCount { + hashCount = nhash + } + } + return hashCount +} + +// isInGraphicList reports whether the rune is in the isGraphic list. This separation +// from IsGraphic allows quoteWith to avoid two calls to IsPrint. +// Should be called only if IsPrint fails. +func isInGraphicList(r rune) bool { + // We know r must fit in 16 bits - see makeisprint.go. + if r > 0xFFFF { + return false + } + rr := uint16(r) + i := bsearch16(isGraphic, rr) + return i < len(isGraphic) && rr == isGraphic[i] +} + +// bsearch16 returns the smallest i such that a[i] >= x. +// If there is no such i, bsearch16 returns len(a). +func bsearch16(a []uint16, x uint16) int { + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 + if a[h] < x { + i = h + 1 + } else { + j = h + } + } + return i +} + +// isGraphic lists the graphic runes not matched by IsPrint. +var isGraphic = []uint16{ + 0x00a0, + 0x1680, + 0x2000, + 0x2001, + 0x2002, + 0x2003, + 0x2004, + 0x2005, + 0x2006, + 0x2007, + 0x2008, + 0x2009, + 0x200a, + 0x202f, + 0x205f, + 0x3000, +} diff --git a/vendor/cuelang.org/go/cue/literal/string.go b/vendor/cuelang.org/go/cue/literal/string.go new file mode 100644 index 0000000000..59fae0a60e --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/string.go @@ -0,0 +1,421 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package literal + +import ( + "errors" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + errSyntax = errors.New("invalid syntax") + errInvalidWhitespace = errors.New("invalid string: invalid whitespace") + errMissingNewline = errors.New( + "invalid string: opening quote of multiline string must be followed by newline") + errUnmatchedQuote = errors.New("invalid string: unmatched quote") + // TODO: making this an error is optional according to RFC 4627. But we + // could make it not an error if this ever results in an issue. + errSurrogate = errors.New("unmatched surrogate pair") +) + +// Unquote interprets s as a single- or double-quoted, single- or multi-line +// string, possibly with custom escape delimiters, returning the string value +// that s quotes. +func Unquote(s string) (string, error) { + info, nStart, _, err := ParseQuotes(s, s) + if err != nil { + return "", err + } + s = s[nStart:] + return info.Unquote(s) +} + +// QuoteInfo describes the type of quotes used for a string. +type QuoteInfo struct { + quote string + whitespace string + numHash int + multiline bool + char byte + numChar byte +} + +// IsDouble reports whether the literal uses double quotes. +func (q QuoteInfo) IsDouble() bool { + return q.char == '"' +} + +// IsMulti reports whether a multi-line string was parsed. +func (q QuoteInfo) IsMulti() bool { + return q.multiline +} + +// Whitespace returns prefix whitespace for multiline strings. +func (q QuoteInfo) Whitespace() string { + return q.whitespace +} + +// ParseQuotes checks if the opening quotes in start matches the ending quotes +// in end and reports its type as q or an error if they do not matching or are +// invalid. nStart indicates the number of bytes used for the opening quote. +func ParseQuotes(start, end string) (q QuoteInfo, nStart, nEnd int, err error) { + for i, c := range start { + if c != '#' { + break + } + q.numHash = i + 1 + } + s := start[q.numHash:] + switch s[0] { + case '"', '\'': + q.char = s[0] + if len(s) > 3 && s[1] == s[0] && s[2] == s[0] { + switch s[3] { + case '\n': + q.quote = start[:3+q.numHash] + case '\r': + if len(s) > 4 && s[4] == '\n' { + q.quote = start[:4+q.numHash] + break + } + fallthrough + default: + return q, 0, 0, errMissingNewline + } + q.multiline = true + q.numChar = 3 + nStart = len(q.quote) + 1 // add whitespace later + } else { + q.quote = start[:1+q.numHash] + q.numChar = 1 + nStart = len(q.quote) + } + default: + return q, 0, 0, errSyntax + } + quote := start[:int(q.numChar)+q.numHash] + for i := 0; i < len(quote); i++ { + if j := len(end) - i - 1; j < 0 || quote[i] != end[j] { + return q, 0, 0, errUnmatchedQuote + } + } + if q.multiline { + i := len(end) - len(quote) + for i > 0 { + r, size := utf8.DecodeLastRuneInString(end[:i]) + if r == '\n' || !unicode.IsSpace(r) { + break + } + i -= size + } + q.whitespace = end[i : len(end)-len(quote)] + + if len(start) > nStart && start[nStart] != '\n' { + if !strings.HasPrefix(start[nStart:], q.whitespace) { + return q, 0, 0, errInvalidWhitespace + } + nStart += len(q.whitespace) + } + } + + return q, nStart, int(q.numChar) + q.numHash, nil +} + +// Unquote unquotes the given string. It must be terminated with a quote or an +// interpolation start. Escape sequences are expanded and surrogates +// are replaced with the corresponding non-surrogate code points. +func (q QuoteInfo) Unquote(s string) (string, error) { + if len(s) > 0 && !q.multiline { + if contains(s, '\n') || contains(s, '\r') { + return "", errSyntax + } + + // Is it trivial? Avoid allocation. + if s[len(s)-1] == q.char && q.numHash == 0 { + if s := s[:len(s)-1]; isSimple(s, rune(q.char)) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + stripNL := false + for len(s) > 0 { + switch s[0] { + case '\r': + s = s[1:] + continue + case '\n': + switch { + case !q.multiline: + fallthrough + default: + return "", errInvalidWhitespace + case strings.HasPrefix(s[1:], q.whitespace): + s = s[1+len(q.whitespace):] + case strings.HasPrefix(s[1:], "\n"): + s = s[1:] + } + stripNL = true + buf = append(buf, '\n') + continue + } + c, multibyte, ss, err := unquoteChar(s, q) + if surHigh <= c && c < surEnd { + if c >= surLow { + return "", errSurrogate + } + var cl rune + cl, _, ss, err = unquoteChar(ss, q) + if cl < surLow || surEnd <= cl { + return "", errSurrogate + } + c = 0x10000 + (c-surHigh)*0x400 + (cl - surLow) + } + + if err != nil { + return "", err + } + + s = ss + if c < 0 { + if c == -2 { + stripNL = false + } + if stripNL { + // Strip the last newline, but only if it came from a closing + // quote. + buf = buf[:len(buf)-1] + } + return string(buf), nil + } + stripNL = false + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + } + // allow unmatched quotes if already checked. + return "", errUnmatchedQuote +} + +const ( + surHigh = 0xD800 + surLow = 0xDC00 + surEnd = 0xE000 +) + +func isSimple(s string, quote rune) bool { + // TODO(perf): check if using a simple DFA to detect surrogate pairs is + // faster than converting to code points. At the very least there should + // be an ASCII fast path. + for _, r := range s { + if r == quote || r == '\\' { + return false + } + if surHigh <= r && r < surEnd { + return false + } + } + return true +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +// unquoteChar decodes the first character or byte in the escaped string. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; the special value +// of -1 indicates terminated by quotes and -2 means terminated by \(. +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, kind, specifies the type of literal being parsed +// and therefore which kind of escape sequences are permitted. +// For kind 's' only JSON escapes and \u{ are permitted. +// For kind 'b' also hexadecimal and octal escape sequences are permitted. +// +// The third argument, quote, specifies that an ASCII quoting character that +// is not permitted in the output. +func unquoteChar(s string, info QuoteInfo) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == info.char && info.char != 0: + for i := 1; byte(i) < info.numChar; i++ { + if i >= len(s) || s[i] != info.char { + return rune(info.char), false, s[1:], nil + } + } + for i := 0; i < info.numHash; i++ { + if i+int(info.numChar) >= len(s) || s[i+int(info.numChar)] != '#' { + return rune(info.char), false, s[1:], nil + } + } + if ln := int(info.numChar) + info.numHash; len(s) != ln { + // TODO: terminating quote in middle of string + return 0, false, s[ln:], errSyntax + } + return -1, false, "", nil + case c >= utf8.RuneSelf: + // TODO: consider handling surrogate values. These are discarded by + // DecodeRuneInString. It is technically correct to disallow it, but + // some JSON parsers allow this anyway. + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + if len(s) <= 1+info.numHash { + return '\\', false, s[1:], nil + } + for i := 1; i <= info.numHash && i < len(s); i++ { + if s[i] != '#' { + return '\\', false, s[1:], nil + } + } + + c := s[1+info.numHash] + s = s[2+info.numHash:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case '/': + value = '/' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + if info.char == '"' { + err = errSyntax + return + } + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + if info.char == '"' { + err = errSyntax + return + } + v := rune(c) - '0' + if len(s) < 2 { + err = errSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + // TODO: should we allow escaping of quotes regardless? + if c != info.char { + err = errSyntax + return + } + value = rune(c) + case '(': + if s != "" { + // TODO: terminating quote in middle of string + return 0, false, s, errSyntax + } + value = -2 + default: + err = errSyntax + return + } + tail = s + return +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/cuelang.org/go/cue/marshal.go b/vendor/cuelang.org/go/cue/marshal.go new file mode 100644 index 0000000000..43ca6d4639 --- /dev/null +++ b/vendor/cuelang.org/go/cue/marshal.go @@ -0,0 +1,221 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "compress/gzip" + "encoding/gob" + "path/filepath" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/format" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" + "cuelang.org/go/internal/core/export" +) + +// root. +type instanceData struct { + Root bool + Path string + Files []fileData +} + +type fileData struct { + Name string + Data []byte +} + +const version = 1 + +type unmarshaller struct { + ctxt *build.Context + imports map[string]*instanceData +} + +func (b *unmarshaller) load(pos token.Pos, path string) *build.Instance { + bi := b.imports[path] + if bi == nil { + return nil + } + return b.build(bi) +} + +func (b *unmarshaller) build(bi *instanceData) *build.Instance { + p := b.ctxt.NewInstance(bi.Path, b.load) + p.ImportPath = bi.Path + for _, f := range bi.Files { + _ = p.AddFile(f.Name, f.Data) + } + p.Complete() + return p +} + +func compileInstances(r *Runtime, data []*instanceData) (instances []*Instance, err error) { + b := unmarshaller{ + ctxt: build.NewContext(), + imports: map[string]*instanceData{}, + } + for _, i := range data { + if i.Path == "" { + if !i.Root { + return nil, errors.Newf(token.NoPos, + "data contains non-root package without import path") + } + continue + } + b.imports[i.Path] = i + } + + builds := []*build.Instance{} + for _, i := range data { + if !i.Root { + continue + } + builds = append(builds, b.build(i)) + } + + return r.build(builds) +} + +// Unmarshal creates an Instance from bytes generated by the MarshalBinary +// method of an instance. +func (r *Runtime) Unmarshal(b []byte) ([]*Instance, error) { + if len(b) == 0 { + return nil, errors.Newf(token.NoPos, "unmarshal failed: empty buffer") + } + + switch b[0] { + case version: + default: + return nil, errors.Newf(token.NoPos, + "unmarshal failed: unsupported version %d, regenerate data", b[0]) + } + + reader, err := gzip.NewReader(bytes.NewReader(b[1:])) + if err != nil { + return nil, errors.Newf(token.NoPos, "unmarshal failed: %v", err) + } + + data := []*instanceData{} + err = gob.NewDecoder(reader).Decode(&data) + if err != nil { + return nil, errors.Newf(token.NoPos, "unmarshal failed: %v", err) + } + + return compileInstances(r, data) +} + +// Marshal creates bytes from a group of instances. Imported instances will +// be included in the emission. +// +// The stored instances are functionally the same, but preserving of file +// information is only done on a best-effort basis. +func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { + staged := []instanceData{} + done := map[string]int{} + + var errs errors.Error + + var stageInstance func(i *Instance) (pos int) + stageInstance = func(i *Instance) (pos int) { + if p, ok := done[i.ImportPath]; ok { + return p + } + // TODO: support exporting instance + file, _ := export.Def(r.runtime(), i.inst.ID(), i.root) + imports := []string{} + file.VisitImports(func(i *ast.ImportDecl) { + for _, spec := range i.Specs { + info, _ := astutil.ParseImportSpec(spec) + imports = append(imports, info.ID) + } + }) + + if i.PkgName != "" { + p, name, _ := internal.PackageInfo(file) + if p == nil { + pkg := &ast.Package{Name: ast.NewIdent(i.PkgName)} + file.Decls = append([]ast.Decl{pkg}, file.Decls...) + } else if name != i.PkgName { + // p is guaranteed to be generated by Def, so it is "safe" to + // modify. + p.Name = ast.NewIdent(i.PkgName) + } + } + + b, err := format.Node(file) + errs = errors.Append(errs, errors.Promote(err, "marshal")) + + filename := "unmarshal" + if i.inst != nil && len(i.inst.Files) == 1 { + filename = i.inst.Files[0].Filename + + dir := i.Dir + if i.inst != nil && i.inst.Root != "" { + dir = i.inst.Root + } + if dir != "" { + filename = filepath.FromSlash(filename) + filename, _ = filepath.Rel(dir, filename) + filename = filepath.ToSlash(filename) + } + } + // TODO: this should probably be changed upstream, but as the path + // is for reference purposes only, this is safe. + importPath := filepath.ToSlash(i.ImportPath) + + staged = append(staged, instanceData{ + Path: importPath, + Files: []fileData{{filename, b}}, + }) + + p := len(staged) - 1 + + for _, imp := range imports { + i := getImportFromPath(r.runtime(), imp) + if i == nil || !strings.Contains(imp, ".") { + continue // a builtin package. + } + stageInstance(i) + } + + return p + } + + for _, i := range instances { + staged[stageInstance(i)].Root = true + } + + buf := &bytes.Buffer{} + buf.WriteByte(version) + + zw := gzip.NewWriter(buf) + if err := gob.NewEncoder(zw).Encode(staged); err != nil { + return nil, err + } + + if err := zw.Close(); err != nil { + return nil, err + } + + return buf.Bytes(), nil + +} diff --git a/vendor/cuelang.org/go/cue/op.go b/vendor/cuelang.org/go/cue/op.go new file mode 100644 index 0000000000..22b31a5d12 --- /dev/null +++ b/vendor/cuelang.org/go/cue/op.go @@ -0,0 +1,182 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" +) + +// Op indicates the operation at the top of an expression tree of the expression +// use to evaluate a value. +type Op = adt.Op + +// Values of Op. +const ( + NoOp Op = adt.NoOp + + AndOp Op = adt.AndOp + OrOp Op = adt.OrOp + + SelectorOp Op = adt.SelectorOp + IndexOp Op = adt.IndexOp + SliceOp Op = adt.SliceOp + CallOp Op = adt.CallOp + + BooleanAndOp Op = adt.BoolAndOp + BooleanOrOp Op = adt.BoolOrOp + + EqualOp Op = adt.EqualOp + NotOp Op = adt.NotOp + NotEqualOp Op = adt.NotEqualOp + LessThanOp Op = adt.LessThanOp + LessThanEqualOp Op = adt.LessEqualOp + GreaterThanOp Op = adt.GreaterThanOp + GreaterThanEqualOp Op = adt.GreaterEqualOp + + RegexMatchOp Op = adt.MatchOp + NotRegexMatchOp Op = adt.NotMatchOp + + AddOp Op = adt.AddOp + SubtractOp Op = adt.SubtractOp + MultiplyOp Op = adt.MultiplyOp + FloatQuotientOp Op = adt.FloatQuotientOp + IntQuotientOp Op = adt.IntQuotientOp + IntRemainderOp Op = adt.IntRemainderOp + IntDivideOp Op = adt.IntDivideOp + IntModuloOp Op = adt.IntModuloOp + + InterpolationOp Op = adt.InterpolationOp +) + +// isCmp reports whether an op is a comparator. +func (op op) isCmp() bool { + return opEql <= op && op <= opGeq +} + +func (op op) unifyType() (unchecked, ok bool) { + if op == opUnifyUnchecked { + return true, true + } + return false, op == opUnify +} + +type op uint16 + +const ( + opUnknown op = iota + + opUnify + opUnifyUnchecked + opDisjunction + + opLand + opLor + opNot + + opEql + opNeq + opMat + opNMat + + opLss + opGtr + opLeq + opGeq + + opAdd + opSub + opMul + opQuo + opRem + + opIDiv + opIMod + opIQuo + opIRem +) + +var opStrings = []string{ + opUnknown: "??", + + opUnify: "&", + // opUnifyUnchecked is internal only. Syntactically this is + // represented as embedding. + opUnifyUnchecked: "&!", + opDisjunction: "|", + + opLand: "&&", + opLor: "||", + opNot: "!", + + opEql: "==", + opNeq: "!=", + opMat: "=~", + opNMat: "!~", + + opLss: "<", + opGtr: ">", + opLeq: "<=", + opGeq: ">=", + + opAdd: "+", + opSub: "-", + opMul: "*", + opQuo: "/", + + opIDiv: "div", + opIMod: "mod", + opIQuo: "quo", + opIRem: "rem", +} + +func (op op) String() string { return opStrings[op] } + +var tokenMap = map[token.Token]op{ + token.OR: opDisjunction, // | + token.AND: opUnify, // & + + token.ADD: opAdd, // + + token.SUB: opSub, // - + token.MUL: opMul, // * + token.QUO: opQuo, // / + + token.IDIV: opIDiv, // div + token.IMOD: opIMod, // mod + token.IQUO: opIQuo, // quo + token.IREM: opIRem, // rem + + token.LAND: opLand, // && + token.LOR: opLor, // || + + token.EQL: opEql, // == + token.LSS: opLss, // < + token.GTR: opGtr, // > + token.NOT: opNot, // ! + + token.NEQ: opNeq, // != + token.LEQ: opLeq, // <= + token.GEQ: opGeq, // >= + token.MAT: opMat, // =~ + token.NMAT: opNMat, // !~ +} + +var opMap = map[op]token.Token{} + +func init() { + for t, o := range tokenMap { + opMap[o] = t + } +} diff --git a/vendor/cuelang.org/go/cue/parser/doc.go b/vendor/cuelang.org/go/cue/parser/doc.go new file mode 100644 index 0000000000..adde13989b --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/doc.go @@ -0,0 +1,23 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parser implements a parser for CUE source files. Input may be +// provided in a variety of forms (see the various Parse* functions); the output +// is an abstract syntax tree (AST) representing the CUE source. The parser is +// invoked through one of the Parse* functions. +// +// The parser accepts a larger language than is syntactically permitted by the +// CUE spec, for simplicity, and for improved robustness in the presence of +// syntax errors. +package parser // import "cuelang.org/go/cue/parser" diff --git a/vendor/cuelang.org/go/cue/parser/fuzz.go b/vendor/cuelang.org/go/cue/parser/fuzz.go new file mode 100644 index 0000000000..21a1d087d6 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/fuzz.go @@ -0,0 +1,26 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build gofuzz +// +build gofuzz + +package parser + +func Fuzz(b []byte) int { + _, err := ParseFile("go-fuzz", b) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/cuelang.org/go/cue/parser/interface.go b/vendor/cuelang.org/go/cue/parser/interface.go new file mode 100644 index 0000000000..8695a6c34a --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/interface.go @@ -0,0 +1,232 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains the exported entry points for invoking the + +package parser + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/source" +) + +// Option specifies a parse option. +type Option func(p *parser) + +var ( + // PackageClauseOnly causes parsing to stop after the package clause. + PackageClauseOnly Option = packageClauseOnly + packageClauseOnly = func(p *parser) { + p.mode |= packageClauseOnlyMode + } + + // ImportsOnly causes parsing to stop parsing after the import declarations. + ImportsOnly Option = importsOnly + importsOnly = func(p *parser) { + p.mode |= importsOnlyMode + } + + // ParseComments causes comments to be parsed. + ParseComments Option = parseComments + parseComments = func(p *parser) { + p.mode |= parseCommentsMode + } + + // Trace causes parsing to print a trace of parsed productions. + Trace Option = traceOpt + traceOpt = func(p *parser) { + p.mode |= traceMode + } + + // DeclarationErrors causes parsing to report declaration errors. + DeclarationErrors Option = declarationErrors + declarationErrors = func(p *parser) { + p.mode |= declarationErrorsMode + } + + // AllErrors causes all errors to be reported (not just the first 10 on different lines). + AllErrors Option = allErrors + allErrors = func(p *parser) { + p.mode |= allErrorsMode + } + + // AllowPartial allows the parser to be used on a prefix buffer. + AllowPartial Option = allowPartial + allowPartial = func(p *parser) { + p.mode |= partialMode + } +) + +// FromVersion specifies until which legacy version the parser should provide +// backwards compatibility. +func FromVersion(version int) Option { + if version >= 0 { + version++ + } + // Versions: + // <0: major version 0 (counting -1000 + x, where x = 100*m+p in 0.m.p + // >=0: x+1 in 1.x.y + return func(p *parser) { p.version = version } +} + +func version0(minor, patch int) int { + return -1000 + 100*minor + patch +} + +// DeprecationError is a sentinel error to indicate that an error is +// related to an unsupported old CUE syntax. +type DeprecationError struct { + Version int +} + +func (e *DeprecationError) Error() string { + return "try running `cue fix` (possibly with an earlier version, like v0.2.2) to upgrade" +} + +// Latest specifies the latest version of the parser, effectively setting +// the strictest implementation. +const Latest = latest + +const latest = -600 + +// FileOffset specifies the File position info to use. +func FileOffset(pos int) Option { + return func(p *parser) { p.offset = pos } +} + +// A mode value is a set of flags (or 0). +// They control the amount of source code parsed and other optional +// parser functionality. +type mode uint + +const ( + packageClauseOnlyMode mode = 1 << iota // stop parsing after package clause + importsOnlyMode // stop parsing after import declarations + parseCommentsMode // parse comments and add them to AST + partialMode + traceMode // print a trace of parsed productions + declarationErrorsMode // report declaration errors + allErrorsMode // report all errors (not just the first 10 on different lines) +) + +// ParseFile parses the source code of a single CUE source file and returns +// the corresponding File node. The source code may be provided via +// the filename of the source file, or via the src parameter. +// +// If src != nil, ParseFile parses the source from src and the filename is +// only used when recording position information. The type of the argument +// for the src parameter must be string, []byte, or io.Reader. +// If src == nil, ParseFile parses the file specified by filename. +// +// The mode parameter controls the amount of source text parsed and other +// optional parser functionality. Position information is recorded in the +// file set fset, which must not be nil. +// +// If the source couldn't be read, the returned AST is nil and the error +// indicates the specific failure. If the source was read but syntax +// errors were found, the result is a partial AST (with Bad* nodes +// representing the fragments of erroneous source code). Multiple errors +// are returned via a ErrorList which is sorted by file position. +func ParseFile(filename string, src interface{}, mode ...Option) (f *ast.File, err error) { + + // get source + text, err := source.Read(filename, src) + if err != nil { + return nil, err + } + + var pp parser + defer func() { + if pp.panicking { + _ = recover() + } + + // set result values + if f == nil { + // source is not a valid Go source file - satisfy + // ParseFile API and return a valid (but) empty + // *File + f = &ast.File{ + // Scope: NewScope(nil), + } + } + + err = errors.Sanitize(pp.errors) + }() + + // parse source + pp.init(filename, text, mode) + f = pp.parseFile() + if f == nil { + return nil, pp.errors + } + f.Filename = filename + astutil.Resolve(f, pp.errf) + + return f, pp.errors +} + +// ParseExpr is a convenience function for parsing an expression. +// The arguments have the same meaning as for Parse, but the source must +// be a valid CUE (type or value) expression. Specifically, fset must not +// be nil. +func ParseExpr(filename string, src interface{}, mode ...Option) (ast.Expr, error) { + // get source + text, err := source.Read(filename, src) + if err != nil { + return nil, err + } + + var p parser + defer func() { + if p.panicking { + _ = recover() + } + err = errors.Sanitize(p.errors) + }() + + // parse expr + p.init(filename, text, mode) + // Set up pkg-level scopes to avoid nil-pointer errors. + // This is not needed for a correct expression x as the + // parser will be ok with a nil topScope, but be cautious + // in case of an erroneous x. + e := p.parseRHS() + + // If a comma was inserted, consume it; + // report an error if there's more tokens. + if p.tok == token.COMMA && p.lit == "\n" { + p.next() + } + if p.mode&partialMode == 0 { + p.expect(token.EOF) + } + + if p.errors != nil { + return nil, p.errors + } + astutil.ResolveExpr(e, p.errf) + + return e, p.errors +} + +// parseExprString is a convenience function for obtaining the AST of an +// expression x. The position information recorded in the AST is undefined. The +// filename used in error messages is the empty string. +func parseExprString(x string) (ast.Expr, error) { + return ParseExpr("", []byte(x)) +} diff --git a/vendor/cuelang.org/go/cue/parser/parser.go b/vendor/cuelang.org/go/cue/parser/parser.go new file mode 100644 index 0000000000..e91b3014e8 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/parser.go @@ -0,0 +1,1669 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strings" + "unicode" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/scanner" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/astinternal" +) + +var debugStr = astinternal.DebugStr + +// The parser structure holds the parser's internal state. +type parser struct { + file *token.File + offset int + errors errors.Error + scanner scanner.Scanner + + // Tracing/debugging + mode mode // parsing mode + trace bool // == (mode & Trace != 0) + panicking bool // set if we are bailing out due to too many errors. + indent int // indentation used for tracing output + + // Comments + leadComment *ast.CommentGroup + comments *commentState + + // Next token + pos token.Pos // token position + tok token.Token // one token look-ahead + lit string // token literal + + // Error recovery + // (used to limit the number of calls to syncXXX functions + // w/o making scanning progress - avoids potential endless + // loops across multiple parser functions during error recovery) + syncPos token.Pos // last synchronization position + syncCnt int // number of calls to syncXXX without progress + + // Non-syntactic parser control + exprLev int // < 0: in control clause, >= 0: in expression + + imports []*ast.ImportSpec // list of imports + + version int +} + +func (p *parser) init(filename string, src []byte, mode []Option) { + p.offset = -1 + for _, f := range mode { + f(p) + } + p.file = token.NewFile(filename, p.offset, len(src)) + + var m scanner.Mode + if p.mode&parseCommentsMode != 0 { + m = scanner.ScanComments + } + eh := func(pos token.Pos, msg string, args []interface{}) { + p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...)) + } + p.scanner.Init(p.file, src, eh, m) + + p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently) + + p.comments = &commentState{pos: -1} + + p.next() +} + +type commentState struct { + parent *commentState + pos int8 + groups []*ast.CommentGroup + + // lists are not attached to nodes themselves. Enclosed expressions may + // miss a comment due to commas and line termination. closeLists ensures + // that comments will be passed to someone. + isList int + lastChild ast.Node + lastPos int8 +} + +// openComments reserves the next doc comment for the caller and flushes +func (p *parser) openComments() *commentState { + child := &commentState{ + parent: p.comments, + } + if c := p.comments; c != nil && c.isList > 0 { + if c.lastChild != nil { + var groups []*ast.CommentGroup + for _, cg := range c.groups { + if cg.Position == 0 { + groups = append(groups, cg) + } + } + groups = append(groups, c.lastChild.Comments()...) + for _, cg := range c.groups { + if cg.Position != 0 { + cg.Position = c.lastPos + groups = append(groups, cg) + } + } + ast.SetComments(c.lastChild, groups) + c.groups = nil + } else { + c.lastChild = nil + // attach before next + for _, cg := range c.groups { + cg.Position = 0 + } + child.groups = c.groups + c.groups = nil + } + } + if p.leadComment != nil { + child.groups = append(child.groups, p.leadComment) + p.leadComment = nil + } + p.comments = child + return child +} + +// openList is used to treat a list of comments as a single comment +// position in a production. +func (p *parser) openList() { + if p.comments.isList > 0 { + p.comments.isList++ + return + } + c := &commentState{ + parent: p.comments, + isList: 1, + } + p.comments = c +} + +func (c *commentState) add(g *ast.CommentGroup) { + g.Position = c.pos + c.groups = append(c.groups, g) +} + +func (p *parser) closeList() { + c := p.comments + if c.lastChild != nil { + for _, cg := range c.groups { + cg.Position = c.lastPos + c.lastChild.AddComment(cg) + } + c.groups = nil + } + switch c.isList--; { + case c.isList < 0: + if !p.panicking { + err := errors.Newf(p.pos, "unmatched close list") + p.errors = errors.Append(p.errors, err) + p.panicking = true + panic(err) + } + case c.isList == 0: + parent := c.parent + if len(c.groups) > 0 { + parent.groups = append(parent.groups, c.groups...) + } + parent.pos++ + p.comments = parent + } +} + +func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node { + if p.comments != c { + if !p.panicking { + err := errors.Newf(p.pos, "unmatched comments") + p.errors = errors.Append(p.errors, err) + p.panicking = true + panic(err) + } + return n + } + p.comments = c.parent + if c.parent != nil { + c.parent.lastChild = n + c.parent.lastPos = c.pos + c.parent.pos++ + } + for _, cg := range c.groups { + if n != nil { + if cg != nil { + n.AddComment(cg) + } + } + } + c.groups = nil + return n +} + +func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr { + c.closeNode(p, n) + return n +} + +func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause { + c.closeNode(p, n) + return n +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *parser) printTrace(a ...interface{}) { + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + pos := p.file.Position(p.pos) + fmt.Printf("%5d:%3d: ", pos.Line, pos.Column) + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *parser, msg string) *parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *parser) { + p.indent-- + p.printTrace(")") +} + +// Advance to the next +func (p *parser) next0() { + // Because of one-token look-ahead, print the previous token + // when tracing as it provides a more readable output. The + // very first token (!p.pos.IsValid()) is not initialized + // (it is ILLEGAL), so don't print it . + if p.trace && p.pos.IsValid() { + s := p.tok.String() + switch { + case p.tok.IsLiteral(): + p.printTrace(s, p.lit) + case p.tok.IsOperator(), p.tok.IsKeyword(): + p.printTrace("\"" + s + "\"") + default: + p.printTrace(s) + } + } + + p.pos, p.tok, p.lit = p.scanner.Scan() +} + +// Consume a comment and return it and the line on which it ends. +func (p *parser) consumeComment() (comment *ast.Comment, endline int) { + // /*-style comments may end on a different line than where they start. + // Scan the comment for '\n' chars and adjust endline accordingly. + endline = p.file.Line(p.pos) + if p.lit[1] == '*' { + p.assertV0(p.pos, 0, 10, "block quotes") + + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.lit); i++ { + if p.lit[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Slash: p.pos, Text: p.lit} + p.next0() + + return +} + +// Consume a group of adjacent comments, add it to the parser's +// comments list, and return it together with the line at which +// the last comment in the group ends. A non-comment token or n +// empty lines terminate a comment group. +func (p *parser) consumeCommentGroup(prevLine, n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + var rel token.RelPos + endline = p.file.Line(p.pos) + switch endline - prevLine { + case 0: + rel = token.Blank + case 1: + rel = token.Newline + default: + rel = token.NewSection + } + for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + cg := &ast.CommentGroup{List: list} + ast.SetRelPos(cg, rel) + comments = cg + return +} + +// Advance to the next non-comment In the process, collect +// any comment groups encountered, and refield the last lead and +// and line comments. +// +// A lead comment is a comment group that starts and ends in a +// line without any other tokens and that is followed by a non-comment +// token on the line immediately after the comment group. +// +// A line comment is a comment group that follows a non-comment +// token on the same line, and that has no tokens after it on the line +// where it ends. +// +// Lead and line comments may be considered documentation that is +// stored in the AST. +func (p *parser) next() { + // A leadComment may not be consumed if it leads an inner token of a node. + if p.leadComment != nil { + p.comments.add(p.leadComment) + } + p.leadComment = nil + prev := p.pos + p.next0() + p.comments.pos++ + + if p.tok == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + currentLine := p.file.Line(p.pos) + prevLine := p.file.Line(prev) + if prevLine == currentLine { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(prevLine, 0) + if p.file.Line(p.pos) != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + comment.Line = true + } + } + + // consume successor comments, if any + endline = -1 + for p.tok == token.COMMENT { + if comment != nil { + p.comments.add(comment) + } + comment, endline = p.consumeCommentGroup(prevLine, 1) + prevLine = currentLine + currentLine = p.file.Line(p.pos) + + } + + if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF { + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + comment.Doc = true + p.leadComment = comment + } else { + p.comments.add(comment) + } + } +} + +// assertV0 indicates the last version at which a certain feature was +// supported. +func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) { + v := version0(minor, patch) + if p.version != 0 && p.version > v { + p.errors = errors.Append(p.errors, + errors.Wrapf(&DeprecationError{v}, pos, + "use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1)) + } +} + +func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) { + // ePos := p.file.Position(pos) + ePos := pos + + // If AllErrors is not set, discard errors reported on the same line + // as the last recorded error and stop parsing if there are more than + // 10 errors. + if p.mode&allErrorsMode == 0 { + errors := errors.Errors(p.errors) + n := len(errors) + if n > 0 && errors[n-1].Position().Line() == ePos.Line() { + return // discard - likely a spurious error + } + if n > 10 { + p.panicking = true + panic("too many errors") + } + } + + p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...)) +} + +func (p *parser) errorExpected(pos token.Pos, obj string) { + if pos != p.pos { + p.errf(pos, "expected %s", obj) + return + } + // the error happened at the current position; + // make the error message more specific + if p.tok == token.COMMA && p.lit == "\n" { + p.errf(pos, "expected %s, found newline", obj) + return + } + + if p.tok.IsLiteral() { + p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit) + } else { + p.errf(pos, "expected %s, found '%s'", obj, p.tok) + } +} + +func (p *parser) expect(tok token.Token) token.Pos { + pos := p.pos + if p.tok != tok { + p.errorExpected(pos, "'"+tok.String()+"'") + } + p.next() // make progress + return pos +} + +// expectClosing is like expect but provides a better error message +// for the common case of a missing comma before a newline. +func (p *parser) expectClosing(tok token.Token, context string) token.Pos { + if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" { + p.errf(p.pos, "missing ',' before newline in %s", context) + p.next() + } + return p.expect(tok) +} + +func (p *parser) expectComma() { + // semicolon is optional before a closing ')', ']', '}', or newline + if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF { + switch p.tok { + case token.COMMA: + p.next() + default: + p.errorExpected(p.pos, "','") + syncExpr(p) + } + } +} + +func (p *parser) atComma(context string, follow ...token.Token) bool { + if p.tok == token.COMMA { + return true + } + for _, t := range follow { + if p.tok == t { + return false + } + } + // TODO: find a way to detect crossing lines now we don't have a semi. + if p.lit == "\n" { + p.errf(p.pos, "missing ',' before newline") + } else { + p.errf(p.pos, "missing ',' in %s", context) + } + return true // "insert" comma and continue +} + +// syncExpr advances to the next field in a field list. +// Used for synchronization after an error. +func syncExpr(p *parser) { + for { + switch p.tok { + case token.COMMA: + // Return only if parser made some progress since last + // sync or if it has not reached 10 sync calls without + // progress. Otherwise consume at least one token to + // avoid an endless parser loop (it is possible that + // both parseOperand and parseStmt call syncStmt and + // correctly do not advance, thus the need for the + // invocation limit p.syncCnt). + if p.pos == p.syncPos && p.syncCnt < 10 { + p.syncCnt++ + return + } + if p.syncPos.Before(p.pos) { + p.syncPos = p.pos + p.syncCnt = 0 + return + } + // Reaching here indicates a parser bug, likely an + // incorrect token list in this function, but it only + // leads to skipping of possibly correct code if a + // previous error is present, and thus is preferred + // over a non-terminating parse. + case token.EOF: + return + } + p.next() + } +} + +// safePos returns a valid file position for a given position: If pos +// is valid to begin with, safePos returns pos. If pos is out-of-range, +// safePos returns the EOF position. +// +// This is hack to work around "artificial" end positions in the AST which +// are computed by adding 1 to (presumably valid) token positions. If the +// token positions are invalid due to parse errors, the resulting end position +// may be past the file's EOF position, which would lead to panics if used +// later on. +func (p *parser) safePos(pos token.Pos) (res token.Pos) { + defer func() { + if recover() != nil { + res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position + } + }() + _ = p.file.Offset(pos) // trigger a panic if position is out-of-range + return pos +} + +// ---------------------------------------------------------------------------- +// Identifiers + +func (p *parser) parseIdent() *ast.Ident { + c := p.openComments() + pos := p.pos + name := "_" + if p.tok == token.IDENT { + name = p.lit + p.next() + } else { + p.expect(token.IDENT) // use expect() error handling + } + ident := &ast.Ident{NamePos: pos, Name: name} + c.closeNode(p, ident) + return ident +} + +func (p *parser) parseKeyIdent() *ast.Ident { + c := p.openComments() + pos := p.pos + name := p.lit + p.next() + ident := &ast.Ident{NamePos: pos, Name: name} + c.closeNode(p, ident) + return ident +} + +// ---------------------------------------------------------------------------- +// Expressions + +// parseOperand returns an expression. +// Callers must verify the result. +func (p *parser) parseOperand() (expr ast.Expr) { + if p.trace { + defer un(trace(p, "Operand")) + } + + switch p.tok { + case token.IDENT: + return p.parseIdent() + + case token.LBRACE: + return p.parseStruct() + + case token.LBRACK: + return p.parseList() + + case token.BOTTOM: + c := p.openComments() + x := &ast.BottomLit{Bottom: p.pos} + p.next() + return c.closeExpr(p, x) + + case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING: + c := p.openComments() + x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} + p.next() + return c.closeExpr(p, x) + + case token.INTERPOLATION: + return p.parseInterpolation() + + case token.LPAREN: + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + lparen := p.pos + p.next() + p.exprLev++ + p.openList() + x := p.parseRHS() // types may be parenthesized: (some type) + p.closeList() + p.exprLev-- + rparen := p.expect(token.RPAREN) + return &ast.ParenExpr{ + Lparen: lparen, + X: x, + Rparen: rparen} + + default: + if p.tok.IsKeyword() { + return p.parseKeyIdent() + } + } + + // we have an error + c := p.openComments() + pos := p.pos + p.errorExpected(pos, "operand") + syncExpr(p) + return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos}) +} + +func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) { + if p.trace { + defer un(trace(p, "IndexOrSlice")) + } + + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + c.pos = 1 + + const N = 2 + lbrack := p.expect(token.LBRACK) + + p.exprLev++ + var index [N]ast.Expr + var colons [N - 1]token.Pos + if p.tok != token.COLON { + index[0] = p.parseRHS() + } + nColons := 0 + for p.tok == token.COLON && nColons < len(colons) { + colons[nColons] = p.pos + nColons++ + p.next() + if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF { + index[nColons] = p.parseRHS() + } + } + p.exprLev-- + rbrack := p.expect(token.RBRACK) + + if nColons > 0 { + return &ast.SliceExpr{ + X: x, + Lbrack: lbrack, + Low: index[0], + High: index[1], + Rbrack: rbrack} + } + + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: index[0], + Rbrack: rbrack} +} + +func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) { + if p.trace { + defer un(trace(p, "CallOrConversion")) + } + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + + p.openList() + defer p.closeList() + + lparen := p.expect(token.LPAREN) + + p.exprLev++ + var list []ast.Expr + for p.tok != token.RPAREN && p.tok != token.EOF { + list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...) + if !p.atComma("argument list", token.RPAREN) { + break + } + p.next() + } + p.exprLev-- + rparen := p.expectClosing(token.RPAREN, "argument list") + + return &ast.CallExpr{ + Fun: fun, + Lparen: lparen, + Args: list, + Rparen: rparen} +} + +// TODO: inline this function in parseFieldList once we no longer user comment +// position information in parsing. +func (p *parser) consumeDeclComma() { + if p.atComma("struct literal", token.RBRACE, token.EOF) { + p.next() + } +} + +func (p *parser) parseFieldList() (list []ast.Decl) { + if p.trace { + defer un(trace(p, "FieldList")) + } + p.openList() + defer p.closeList() + + for p.tok != token.RBRACE && p.tok != token.EOF { + switch p.tok { + case token.ATTRIBUTE: + list = append(list, p.parseAttribute()) + p.consumeDeclComma() + + case token.ELLIPSIS: + c := p.openComments() + ellipsis := &ast.Ellipsis{Ellipsis: p.pos} + p.next() + c.closeNode(p, ellipsis) + list = append(list, ellipsis) + p.consumeDeclComma() + + default: + list = append(list, p.parseField()) + } + + // TODO: handle next comma here, after disallowing non-colon separator + // and we have eliminated the need comment positions. + } + + return +} + +func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) { + if p.trace { + defer un(trace(p, "Field")) + } + + c := p.openComments() + + letPos := p.expect(token.LET) + if p.tok != token.IDENT { + c.closeNode(p, ident) + return nil, &ast.Ident{ + NamePos: letPos, + Name: "let", + } + } + defer func() { c.closeNode(p, decl) }() + + ident = p.parseIdent() + assign := p.expect(token.BIND) + expr := p.parseRHS() + + p.consumeDeclComma() + + return &ast.LetClause{ + Let: letPos, + Ident: ident, + Equal: assign, + Expr: expr, + }, nil +} + +func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) { + if p.trace { + defer un(trace(p, "Comprehension")) + } + + c := p.openComments() + defer func() { c.closeNode(p, decl) }() + + tok := p.tok + pos := p.pos + clauses, fc := p.parseComprehensionClauses(true) + if fc != nil { + ident = &ast.Ident{ + NamePos: pos, + Name: tok.String(), + } + fc.closeNode(p, ident) + return nil, ident + } + + sc := p.openComments() + expr := p.parseStruct() + sc.closeExpr(p, expr) + + if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF + p.next() + } + + return &ast.Comprehension{ + Clauses: clauses, + Value: expr, + }, nil +} + +func (p *parser) parseField() (decl ast.Decl) { + if p.trace { + defer un(trace(p, "Field")) + } + + c := p.openComments() + defer func() { c.closeNode(p, decl) }() + + pos := p.pos + + this := &ast.Field{Label: nil} + m := this + + tok := p.tok + + label, expr, decl, ok := p.parseLabel(false) + if decl != nil { + return decl + } + m.Label = label + + if !ok { + if expr == nil { + expr = p.parseRHS() + } + if a, ok := expr.(*ast.Alias); ok { + p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`) + p.consumeDeclComma() + return a + } + e := &ast.EmbedDecl{Expr: expr} + p.consumeDeclComma() + return e + } + + if p.tok == token.OPTION { + m.Optional = p.pos + p.next() + } + + // TODO: consider disallowing comprehensions with more than one label. + // This can be a bit awkward in some cases, but it would naturally + // enforce the proper style that a comprehension be defined in the + // smallest possible scope. + // allowComprehension = false + + switch p.tok { + case token.COLON, token.ISA: + case token.COMMA: + p.expectComma() // sync parser. + fallthrough + + case token.RBRACE, token.EOF: + if a, ok := expr.(*ast.Alias); ok { + p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`) + return a + } + switch tok { + case token.IDENT, token.LBRACK, token.LPAREN, + token.STRING, token.INTERPOLATION, + token.NULL, token.TRUE, token.FALSE, + token.FOR, token.IF, token.LET, token.IN: + return &ast.EmbedDecl{Expr: expr} + } + fallthrough + + default: + p.errorExpected(p.pos, "label or ':'") + return &ast.BadDecl{From: pos, To: p.pos} + } + + m.TokenPos = p.pos + m.Token = p.tok + if p.tok == token.ISA { + p.assertV0(p.pos, 2, 0, "'::'") + } + if p.tok != token.COLON && p.tok != token.ISA { + p.errorExpected(pos, "':' or '::'") + } + p.next() // : or :: + + for { + if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 { + p.errf(l.Pos(), "square bracket must have exactly one element") + } + + tok := p.tok + label, expr, _, ok := p.parseLabel(true) + if !ok || (p.tok != token.COLON && p.tok != token.ISA && p.tok != token.OPTION) { + if expr == nil { + expr = p.parseRHS() + } + m.Value = expr + break + } + field := &ast.Field{Label: label} + m.Value = &ast.StructLit{Elts: []ast.Decl{field}} + m = field + + if tok != token.LSS && p.tok == token.OPTION { + m.Optional = p.pos + p.next() + } + + m.TokenPos = p.pos + m.Token = p.tok + if p.tok == token.ISA { + p.assertV0(p.pos, 2, 0, "'::'") + } + if p.tok != token.COLON && p.tok != token.ISA { + if p.tok.IsLiteral() { + p.errf(p.pos, "expected ':' or '::'; found %s", p.lit) + } else { + p.errf(p.pos, "expected ':' or '::'; found %s", p.tok) + } + break + } + p.next() + } + + if attrs := p.parseAttributes(); attrs != nil { + m.Attrs = attrs + } + + p.consumeDeclComma() + + return this +} + +func (p *parser) parseAttributes() (attrs []*ast.Attribute) { + p.openList() + for p.tok == token.ATTRIBUTE { + attrs = append(attrs, p.parseAttribute()) + } + p.closeList() + return attrs +} + +func (p *parser) parseAttribute() *ast.Attribute { + c := p.openComments() + a := &ast.Attribute{At: p.pos, Text: p.lit} + p.next() + c.closeNode(p, a) + return a +} + +func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) { + tok := p.tok + switch tok { + + case token.FOR, token.IF: + if rhs { + expr = p.parseExpr() + break + } + comp, ident := p.parseComprehension() + if comp != nil { + return nil, nil, comp, false + } + expr = ident + + case token.LET: + let, ident := p.parseLetDecl() + if let != nil { + return nil, nil, let, false + } + expr = ident + + case token.IDENT, token.STRING, token.INTERPOLATION, token.LPAREN, + token.NULL, token.TRUE, token.FALSE, token.IN: + expr = p.parseExpr() + + case token.LBRACK: + expr = p.parseRHS() + switch x := expr.(type) { + case *ast.ListLit: + // Note: caller must verify this list is suitable as a label. + label, ok = x, true + } + } + + switch x := expr.(type) { + case *ast.BasicLit: + switch x.Kind { + case token.STRING, token.NULL, token.TRUE, token.FALSE: + // Keywords that represent operands. + + // Allowing keywords to be used as a labels should not interfere with + // generating good errors: any keyword can only appear on the RHS of a + // field (after a ':'), whereas labels always appear on the LHS. + + label, ok = x, true + } + + case *ast.Ident: + if strings.HasPrefix(x.Name, "__") { + p.errf(x.NamePos, "identifiers starting with '__' are reserved") + } + + expr = p.parseAlias(x) + if a, ok := expr.(*ast.Alias); ok { + if _, ok = a.Expr.(ast.Label); !ok { + break + } + label = a + } else { + label = x + } + ok = true + + case ast.Label: + label, ok = x, true + } + return label, expr, nil, ok +} + +func (p *parser) parseStruct() (expr ast.Expr) { + lbrace := p.expect(token.LBRACE) + + if p.trace { + defer un(trace(p, "StructLit")) + } + + elts := p.parseStructBody() + rbrace := p.expectClosing(token.RBRACE, "struct literal") + return &ast.StructLit{ + Lbrace: lbrace, + Elts: elts, + Rbrace: rbrace, + } +} + +func (p *parser) parseStructBody() []ast.Decl { + if p.trace { + defer un(trace(p, "StructBody")) + } + + p.exprLev++ + var elts []ast.Decl + + // TODO: consider "stealing" non-lead comments. + // for _, cg := range p.comments.groups { + // if cg != nil { + // elts = append(elts, cg) + // } + // } + // p.comments.groups = p.comments.groups[:0] + + if p.tok != token.RBRACE { + elts = p.parseFieldList() + } + p.exprLev-- + + return elts +} + +// parseComprehensionClauses parses either new-style (first==true) +// or old-style (first==false). +// Should we now disallow keywords as identifiers? If not, we need to +// return a list of discovered labels as the alternative. +func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) { + // TODO: reuse Template spec, which is possible if it doesn't check the + // first is an identifier. + + for { + switch p.tok { + case token.FOR: + c := p.openComments() + forPos := p.expect(token.FOR) + if first { + switch p.tok { + case token.COLON, token.ISA, token.BIND, token.OPTION, + token.COMMA, token.EOF: + return nil, c + } + } + + var key, value *ast.Ident + var colon token.Pos + value = p.parseIdent() + if p.tok == token.COMMA { + colon = p.expect(token.COMMA) + key = value + value = p.parseIdent() + } + c.pos = 4 + // params := p.parseParams(nil, ARROW) + clauses = append(clauses, c.closeClause(p, &ast.ForClause{ + For: forPos, + Key: key, + Colon: colon, + Value: value, + In: p.expect(token.IN), + Source: p.parseRHS(), + })) + + case token.IF: + c := p.openComments() + ifPos := p.expect(token.IF) + if first { + switch p.tok { + case token.COLON, token.ISA, token.BIND, token.OPTION, + token.COMMA, token.EOF: + return nil, c + } + } + + clauses = append(clauses, c.closeClause(p, &ast.IfClause{ + If: ifPos, + Condition: p.parseRHS(), + })) + + case token.LET: + c := p.openComments() + letPos := p.expect(token.LET) + + ident := p.parseIdent() + assign := p.expect(token.BIND) + expr := p.parseRHS() + + clauses = append(clauses, c.closeClause(p, &ast.LetClause{ + Let: letPos, + Ident: ident, + Equal: assign, + Expr: expr, + })) + + default: + return clauses, nil + } + if p.tok == token.COMMA { + p.next() + } + + first = false + } +} + +func (p *parser) parseList() (expr ast.Expr) { + lbrack := p.expect(token.LBRACK) + + if p.trace { + defer un(trace(p, "ListLiteral")) + } + + elts := p.parseListElements() + + if p.tok == token.ELLIPSIS { + ellipsis := &ast.Ellipsis{ + Ellipsis: p.pos, + } + elts = append(elts, ellipsis) + p.next() + if p.tok != token.COMMA && p.tok != token.RBRACK { + ellipsis.Type = p.parseRHS() + } + if p.atComma("list literal", token.RBRACK) { + p.next() + } + } + + rbrack := p.expectClosing(token.RBRACK, "list literal") + return &ast.ListLit{ + Lbrack: lbrack, + Elts: elts, + Rbrack: rbrack} +} + +func (p *parser) parseListElements() (list []ast.Expr) { + if p.trace { + defer un(trace(p, "ListElements")) + } + p.openList() + defer p.closeList() + + for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF { + expr, ok := p.parseListElement() + list = append(list, expr) + if !ok { + break + } + } + + return +} + +func (p *parser) parseListElement() (expr ast.Expr, ok bool) { + if p.trace { + defer un(trace(p, "ListElement")) + } + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + + switch p.tok { + case token.FOR, token.IF: + tok := p.tok + pos := p.pos + clauses, fc := p.parseComprehensionClauses(true) + if clauses != nil { + sc := p.openComments() + expr := p.parseStruct() + sc.closeExpr(p, expr) + + if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF + p.next() + } + + return &ast.Comprehension{ + Clauses: clauses, + Value: expr, + }, true + } + + expr = &ast.Ident{ + NamePos: pos, + Name: tok.String(), + } + fc.closeNode(p, expr) + + default: + expr = p.parseUnaryExpr() + } + + expr = p.parseBinaryExprTail(token.LowestPrec+1, expr) + expr = p.parseAlias(expr) + + // Enforce there is an explicit comma. We could also allow the + // omission of commas in lists, but this gives rise to some ambiguities + // with list comprehensions. + if p.tok == token.COMMA && p.lit != "," { + p.next() + // Allow missing comma for last element, though, to be compliant + // with JSON. + if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF { + return expr, false + } + p.errf(p.pos, "missing ',' before newline in list literal") + } else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) { + return expr, false + } + p.next() + + return expr, true +} + +// parseAlias turns an expression into an alias. +func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) { + if p.tok != token.BIND { + return lhs + } + pos := p.pos + p.next() + expr = p.parseRHS() + if expr == nil { + panic("empty return") + } + switch x := lhs.(type) { + case *ast.Ident: + return &ast.Alias{Ident: x, Equal: pos, Expr: expr} + } + p.errf(p.pos, "expected identifier for alias") + return expr +} + +// checkExpr checks that x is an expression (and not a type). +func (p *parser) checkExpr(x ast.Expr) ast.Expr { + switch unparen(x).(type) { + case *ast.BadExpr: + case *ast.BottomLit: + case *ast.Ident: + case *ast.BasicLit: + case *ast.Interpolation: + case *ast.StructLit: + case *ast.ListLit: + case *ast.ParenExpr: + panic("unreachable") + case *ast.SelectorExpr: + case *ast.IndexExpr: + case *ast.SliceExpr: + case *ast.CallExpr: + case *ast.UnaryExpr: + case *ast.BinaryExpr: + default: + // all other nodes are not proper expressions + p.errorExpected(x.Pos(), "expression") + x = &ast.BadExpr{ + From: x.Pos(), To: p.safePos(x.End()), + } + } + return x +} + +// If x is of the form (T), unparen returns unparen(T), otherwise it returns x. +func unparen(x ast.Expr) ast.Expr { + if p, isParen := x.(*ast.ParenExpr); isParen { + x = unparen(p.X) + } + return x +} + +// If lhs is set and the result is an identifier, it is not resolved. +func (p *parser) parsePrimaryExpr() ast.Expr { + if p.trace { + defer un(trace(p, "PrimaryExpr")) + } + + return p.parsePrimaryExprTail(p.parseOperand()) +} + +func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr { + x := operand +L: + for { + switch p.tok { + case token.PERIOD: + c := p.openComments() + c.pos = 1 + p.next() + switch p.tok { + case token.IDENT: + x = &ast.SelectorExpr{ + X: p.checkExpr(x), + Sel: p.parseIdent(), + } + case token.STRING: + if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) { + str := &ast.BasicLit{ + ValuePos: p.pos, + Kind: token.STRING, + Value: p.lit, + } + p.next() + x = &ast.SelectorExpr{ + X: p.checkExpr(x), + Sel: str, + } + break + } + fallthrough + default: + pos := p.pos + p.errorExpected(pos, "selector") + p.next() // make progress + x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}} + } + c.closeNode(p, x) + case token.LBRACK: + x = p.parseIndexOrSlice(p.checkExpr(x)) + case token.LPAREN: + x = p.parseCallOrConversion(p.checkExpr(x)) + default: + break L + } + } + + return x +} + +// If lhs is set and the result is an identifier, it is not resolved. +func (p *parser) parseUnaryExpr() ast.Expr { + if p.trace { + defer un(trace(p, "UnaryExpr")) + } + + switch p.tok { + case token.ADD, token.SUB, token.NOT, token.MUL, + token.LSS, token.LEQ, token.GEQ, token.GTR, + token.NEQ, token.MAT, token.NMAT: + pos, op := p.pos, p.tok + c := p.openComments() + p.next() + return c.closeExpr(p, &ast.UnaryExpr{ + OpPos: pos, + Op: op, + X: p.checkExpr(p.parseUnaryExpr()), + }) + } + + return p.parsePrimaryExpr() +} + +func (p *parser) tokPrec() (token.Token, int) { + tok := p.tok + if tok == token.IDENT { + switch p.lit { + case "quo": + return token.IQUO, 7 + case "rem": + return token.IREM, 7 + case "div": + return token.IDIV, 7 + case "mod": + return token.IMOD, 7 + default: + return tok, 0 + } + } + return tok, tok.Precedence() +} + +// If lhs is set and the result is an identifier, it is not resolved. +func (p *parser) parseBinaryExpr(prec1 int) ast.Expr { + if p.trace { + defer un(trace(p, "BinaryExpr")) + } + p.openList() + defer p.closeList() + + return p.parseBinaryExprTail(prec1, p.parseUnaryExpr()) +} + +func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr { + for { + op, prec := p.tokPrec() + if prec < prec1 { + return x + } + c := p.openComments() + c.pos = 1 + pos := p.expect(p.tok) + x = c.closeExpr(p, &ast.BinaryExpr{ + X: p.checkExpr(x), + OpPos: pos, + Op: op, + // Treat nested expressions as RHS. + Y: p.checkExpr(p.parseBinaryExpr(prec + 1))}) + } +} + +func (p *parser) parseInterpolation() (expr ast.Expr) { + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + + p.openList() + defer p.closeList() + + cc := p.openComments() + + lit := p.lit + pos := p.pos + p.next() + last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit} + exprs := []ast.Expr{last} + + for p.tok == token.LPAREN { + c.pos = 1 + p.expect(token.LPAREN) + cc.closeExpr(p, last) + + exprs = append(exprs, p.parseRHS()) + + cc = p.openComments() + if p.tok != token.RPAREN { + p.errf(p.pos, "expected ')' for string interpolation") + } + lit = p.scanner.ResumeInterpolation() + pos = p.pos + p.next() + last = &ast.BasicLit{ + ValuePos: pos, + Kind: token.STRING, + Value: lit, + } + exprs = append(exprs, last) + } + cc.closeExpr(p, last) + return &ast.Interpolation{Elts: exprs} +} + +// Callers must check the result (using checkExpr), depending on context. +func (p *parser) parseExpr() (expr ast.Expr) { + if p.trace { + defer un(trace(p, "Expression")) + } + + c := p.openComments() + defer func() { c.closeExpr(p, expr) }() + + return p.parseBinaryExpr(token.LowestPrec + 1) +} + +func (p *parser) parseRHS() ast.Expr { + x := p.checkExpr(p.parseExpr()) + return x +} + +// ---------------------------------------------------------------------------- +// Declarations + +func isValidImport(lit string) bool { + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal + if p := strings.LastIndexByte(s, ':'); p >= 0 { + s = s[:p] + } + for _, r := range s { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return false + } + } + return s != "" +} + +func (p *parser) parseImportSpec(_ int) *ast.ImportSpec { + if p.trace { + defer un(trace(p, "ImportSpec")) + } + + c := p.openComments() + + var ident *ast.Ident + if p.tok == token.IDENT { + ident = p.parseIdent() + } + + pos := p.pos + var path string + if p.tok == token.STRING { + path = p.lit + if !isValidImport(path) { + p.errf(pos, "invalid import path: %s", path) + } + p.next() + p.expectComma() // call before accessing p.linecomment + } else { + p.expect(token.STRING) // use expect() error handling + if p.tok == token.COMMA { + p.expectComma() // call before accessing p.linecomment + } + } + // collect imports + spec := &ast.ImportSpec{ + Name: ident, + Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path}, + } + c.closeNode(p, spec) + p.imports = append(p.imports, spec) + + return spec +} + +func (p *parser) parseImports() *ast.ImportDecl { + if p.trace { + defer un(trace(p, "Imports")) + } + c := p.openComments() + + ident := p.parseIdent() + var lparen, rparen token.Pos + var list []*ast.ImportSpec + if p.tok == token.LPAREN { + lparen = p.pos + p.next() + p.openList() + for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ { + list = append(list, p.parseImportSpec(iota)) + } + p.closeList() + rparen = p.expect(token.RPAREN) + p.expectComma() + } else { + list = append(list, p.parseImportSpec(0)) + } + + d := &ast.ImportDecl{ + Import: ident.Pos(), + Lparen: lparen, + Specs: list, + Rparen: rparen, + } + c.closeNode(p, d) + return d +} + +// ---------------------------------------------------------------------------- +// Source files + +func (p *parser) parseFile() *ast.File { + if p.trace { + defer un(trace(p, "File")) + } + + c := p.comments + + // Don't bother parsing the rest if we had errors scanning the first + // Likely not a Go source file at all. + if p.errors != nil { + return nil + } + p.openList() + + var decls []ast.Decl + + for p.tok == token.ATTRIBUTE { + decls = append(decls, p.parseAttribute()) + p.consumeDeclComma() + } + + // The package clause is not a declaration: it does not appear in any + // scope. + if p.tok == token.IDENT && p.lit == "package" { + c := p.openComments() + + pos := p.pos + var name *ast.Ident + p.expect(token.IDENT) + name = p.parseIdent() + if name.Name == "_" && p.mode&declarationErrorsMode != 0 { + p.errf(p.pos, "invalid package name _") + } + + pkg := &ast.Package{ + PackagePos: pos, + Name: name, + } + decls = append(decls, pkg) + p.expectComma() + c.closeNode(p, pkg) + } + + for p.tok == token.ATTRIBUTE { + decls = append(decls, p.parseAttribute()) + p.consumeDeclComma() + } + + if p.mode&packageClauseOnlyMode == 0 { + // import decls + for p.tok == token.IDENT && p.lit == "import" { + decls = append(decls, p.parseImports()) + } + + if p.mode&importsOnlyMode == 0 { + // rest of package decls + // TODO: loop and allow multiple expressions. + decls = append(decls, p.parseFieldList()...) + p.expect(token.EOF) + } + } + p.closeList() + + f := &ast.File{ + Imports: p.imports, + Decls: decls, + } + c.closeNode(p, f) + return f +} diff --git a/vendor/cuelang.org/go/cue/path.go b/vendor/cuelang.org/go/cue/path.go new file mode 100644 index 0000000000..8054107848 --- /dev/null +++ b/vendor/cuelang.org/go/cue/path.go @@ -0,0 +1,520 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/astinternal" + "cuelang.org/go/internal/core/adt" + "github.com/cockroachdb/apd/v2" +) + +// A Selector is a component of a path. +type Selector struct { + sel selector +} + +// String reports the CUE representation of a selector. +func (sel Selector) String() string { + return sel.sel.String() +} + +// IsString reports whether sel is a regular label type. +func (sel Selector) IsString() bool { + return sel.sel.kind() == adt.StringLabel +} + +// IsDefinition reports whether sel is a non-hidden definition label type. +func (sel Selector) IsDefinition() bool { + return sel.sel.kind() == adt.DefinitionLabel +} + +// PkgPath reports the package path associated with a hidden label or "" if +// this is not a hidden label. +func (sel Selector) PkgPath() string { + h, _ := sel.sel.(scopedSelector) + return h.pkg +} + +var ( + + // AnyDefinition can be used to ask for any definition. + // + // In paths it is used to select constraints that apply to all elements. + // AnyDefinition = anyDefinition + anyDefinition = Selector{sel: anySelector(adt.AnyDefinition)} + + // AnyIndex can be used to ask for any index. + // + // In paths it is used to select constraints that apply to all elements. + AnyIndex = anyIndex + anyIndex = Selector{sel: anySelector(adt.AnyIndex)} + + // AnyString can be used to ask for any regular string field. + // + // In paths it is used to select constraints that apply to all elements. + AnyString = anyString + anyString = Selector{sel: anySelector(adt.AnyString)} +) + +// Optional converts sel into an optional equivalent. +// foo -> foo? +func (sel Selector) Optional() Selector { + return wrapOptional(sel) +} + +type selector interface { + String() string + + feature(ctx adt.Runtime) adt.Feature + kind() adt.FeatureType + optional() bool +} + +// A Path is series of selectors to query a CUE value. +type Path struct { + path []Selector +} + +// MakePath creates a Path from a sequence of selectors. +func MakePath(selectors ...Selector) Path { + return Path{path: selectors} +} + +// pathToString is a utility function for creating debugging info. +func pathToStrings(p Path) (a []string) { + for _, sel := range p.Selectors() { + a = append(a, sel.String()) + } + return a +} + +// ParsePath parses a CUE expression into a Path. Any error resulting from +// this conversion can be obtained by calling Err on the result. +// +// Unlike with normal CUE expressions, the first element of the path may be +// a string literal. +// +// A path may not contain hidden fields. To create a path with hidden fields, +// use MakePath and Ident. +func ParsePath(s string) Path { + if s == "" { + return Path{} + } + expr, err := parser.ParseExpr("", s) + if err != nil { + return MakePath(Selector{pathError{errors.Promote(err, "invalid path")}}) + } + + p := Path{path: toSelectors(expr)} + for _, sel := range p.path { + if sel.sel.kind().IsHidden() { + return MakePath(Selector{pathError{errors.Newf(token.NoPos, + "invalid path: hidden fields not allowed in path %s", s)}}) + } + } + return p +} + +// Selectors reports the individual selectors of a path. +func (p Path) Selectors() []Selector { + return p.path +} + +// String reports the CUE representation of p. +func (p Path) String() string { + if err := p.Err(); err != nil { + return "_|_" + } + + b := &strings.Builder{} + for i, sel := range p.path { + x := sel.sel + // TODO: use '.' in all cases, once supported. + _, isAny := x.(anySelector) + switch { + case x.kind() == adt.IntLabel && !isAny: + b.WriteByte('[') + b.WriteString(x.String()) + b.WriteByte(']') + continue + case i > 0: + b.WriteByte('.') + } + + b.WriteString(x.String()) + } + return b.String() +} + +// Optional returns the optional form of a Path. For instance, +// foo.bar --> foo?.bar? +// +func (p Path) Optional() Path { + q := make([]Selector, 0, len(p.path)) + for _, s := range p.path { + q = appendSelector(q, wrapOptional(s)) + } + return Path{path: q} +} + +func toSelectors(expr ast.Expr) []Selector { + switch x := expr.(type) { + case *ast.Ident: + return []Selector{Label(x)} + + case *ast.BasicLit: + return []Selector{basicLitSelector(x)} + + case *ast.IndexExpr: + a := toSelectors(x.X) + var sel Selector + if b, ok := x.Index.(*ast.BasicLit); !ok { + sel = Selector{pathError{ + errors.Newf(token.NoPos, "non-constant expression %s", + astinternal.DebugStr(x.Index))}} + } else { + sel = basicLitSelector(b) + } + return appendSelector(a, sel) + + case *ast.SelectorExpr: + a := toSelectors(x.X) + return appendSelector(a, Label(x.Sel)) + + default: + return []Selector{{pathError{ + errors.Newf(token.NoPos, "invalid label %s ", astinternal.DebugStr(x)), + }}} + } +} + +// appendSelector is like append(a, sel), except that it collects errors +// in a one-element slice. +func appendSelector(a []Selector, sel Selector) []Selector { + err, isErr := sel.sel.(pathError) + if len(a) == 1 { + if p, ok := a[0].sel.(pathError); ok { + if isErr { + p.Error = errors.Append(p.Error, err.Error) + } + return a + } + } + if isErr { + return []Selector{sel} + } + return append(a, sel) +} + +func basicLitSelector(b *ast.BasicLit) Selector { + switch b.Kind { + case token.INT: + var n literal.NumInfo + if err := literal.ParseNum(b.Value, &n); err != nil { + return Selector{pathError{ + errors.Newf(token.NoPos, "invalid string index %s", b.Value), + }} + } + var d apd.Decimal + _ = n.Decimal(&d) + i, err := d.Int64() + if err != nil { + return Selector{pathError{ + errors.Newf(token.NoPos, "integer %s out of range", b.Value), + }} + } + return Index(int(i)) + + case token.STRING: + info, _, _, _ := literal.ParseQuotes(b.Value, b.Value) + if !info.IsDouble() { + return Selector{pathError{ + errors.Newf(token.NoPos, "invalid string index %s", b.Value)}} + } + s, _ := literal.Unquote(b.Value) + return Selector{stringSelector(s)} + + default: + return Selector{pathError{ + errors.Newf(token.NoPos, "invalid literal %s", b.Value), + }} + } +} + +// Label converts an AST label to a Selector. +func Label(label ast.Label) Selector { + switch x := label.(type) { + case *ast.Ident: + switch s := x.Name; { + case strings.HasPrefix(s, "_"): + // TODO: extract package from a bound identifier. + return Selector{pathError{errors.Newf(token.NoPos, + "invalid path: hidden label %s not allowed", s), + }} + case strings.HasPrefix(s, "#"): + return Selector{definitionSelector(x.Name)} + default: + return Selector{stringSelector(x.Name)} + } + + case *ast.BasicLit: + return basicLitSelector(x) + + default: + return Selector{pathError{ + errors.Newf(token.NoPos, "invalid label %s ", astinternal.DebugStr(x)), + }} + } +} + +// Err reports errors that occurred when generating the path. +func (p Path) Err() error { + var errs errors.Error + for _, x := range p.path { + if err, ok := x.sel.(pathError); ok { + errs = errors.Append(errs, err.Error) + } + } + return errs +} + +func isHiddenOrDefinition(s string) bool { + return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_") +} + +// Hid returns a selector for a hidden field. It panics is pkg is empty. +// Hidden fields are scoped by package, and pkg indicates for which package +// the hidden field must apply.For anonymous packages, it must be set to "_". +func Hid(name, pkg string) Selector { + if !ast.IsValidIdent(name) { + panic(fmt.Sprintf("invalid identifier %s", name)) + } + if !strings.HasPrefix(name, "_") { + panic(fmt.Sprintf("%s is not a hidden field identifier", name)) + } + if pkg == "" { + panic(fmt.Sprintf("missing package for hidden identifier %s", name)) + } + return Selector{scopedSelector{name, pkg}} +} + +type scopedSelector struct { + name, pkg string +} + +// String returns the CUE representation of the definition. +func (s scopedSelector) String() string { + return s.name +} +func (scopedSelector) optional() bool { return false } + +func (s scopedSelector) kind() adt.FeatureType { + switch { + case strings.HasPrefix(s.name, "#"): + return adt.DefinitionLabel + case strings.HasPrefix(s.name, "_#"): + return adt.HiddenDefinitionLabel + case strings.HasPrefix(s.name, "_"): + return adt.HiddenLabel + default: + return adt.StringLabel + } +} + +func (s scopedSelector) feature(r adt.Runtime) adt.Feature { + return adt.MakeIdentLabel(r, s.name, s.pkg) +} + +// A Def marks a string as a definition label. An # will be added if a string is +// not prefixed with a #. It will panic if s cannot be written as a valid +// identifier. +func Def(s string) Selector { + if !strings.HasPrefix(s, "#") && !strings.HasPrefix(s, "_#") { + s = "#" + s + } + if !ast.IsValidIdent(s) { + panic(fmt.Sprintf("invalid definition %s", s)) + } + return Selector{definitionSelector(s)} +} + +type definitionSelector string + +// String returns the CUE representation of the definition. +func (d definitionSelector) String() string { + return string(d) +} + +func (d definitionSelector) optional() bool { return false } + +func (d definitionSelector) kind() adt.FeatureType { + return adt.DefinitionLabel +} + +func (d definitionSelector) feature(r adt.Runtime) adt.Feature { + return adt.MakeIdentLabel(r, string(d), "") +} + +// A Str is a CUE string label. Definition selectors are defined with Def. +func Str(s string) Selector { + return Selector{stringSelector(s)} +} + +type stringSelector string + +func (s stringSelector) String() string { + str := string(s) + if isHiddenOrDefinition(str) || !ast.IsValidIdent(str) { + return literal.Label.Quote(str) + } + return str +} + +func (s stringSelector) optional() bool { return false } +func (s stringSelector) kind() adt.FeatureType { return adt.StringLabel } + +func (s stringSelector) feature(r adt.Runtime) adt.Feature { + return adt.MakeStringLabel(r, string(s)) +} + +// An Index selects a list element by index. +func Index(x int) Selector { + f, err := adt.MakeLabel(nil, int64(x), adt.IntLabel) + if err != nil { + return Selector{pathError{err}} + } + return Selector{indexSelector(f)} +} + +type indexSelector adt.Feature + +func (s indexSelector) String() string { + return strconv.Itoa(adt.Feature(s).Index()) +} + +func (s indexSelector) kind() adt.FeatureType { return adt.IntLabel } +func (s indexSelector) optional() bool { return false } + +func (s indexSelector) feature(r adt.Runtime) adt.Feature { + return adt.Feature(s) +} + +// an anySelector represents a wildcard option of a particular type. +type anySelector adt.Feature + +func (s anySelector) String() string { return "[_]" } +func (s anySelector) optional() bool { return true } +func (s anySelector) kind() adt.FeatureType { return adt.Feature(s).Typ() } + +func (s anySelector) feature(r adt.Runtime) adt.Feature { + return adt.Feature(s) +} + +// TODO: allow import paths to be represented? +// +// // ImportPath defines a lookup at the root of an instance. It must be the first +// // element of a Path. +// func ImportPath(s string) Selector { +// return importSelector(s) +// } +type optionalSelector struct { + selector +} + +func wrapOptional(sel Selector) Selector { + if !sel.sel.optional() { + sel = Selector{optionalSelector{sel.sel}} + } + return sel +} + +// func isOptional(sel selector) bool { +// _, ok := sel.(optionalSelector) +// return ok +// } + +func (s optionalSelector) optional() bool { return true } + +func (s optionalSelector) String() string { + return s.selector.String() + "?" +} + +// TODO: allow looking up in parent scopes? + +// // Parent returns a Selector for looking up in the parent of a current node. +// // Parent selectors may only occur at the start of a Path. +// func Parent() Selector { +// return parentSelector{} +// } + +// type parentSelector struct{} + +// func (p parentSelector) String() string { return "__up" } +// func (p parentSelector) feature(r adt.Runtime) adt.Feature { +// return adt.InvalidLabel +// } + +type pathError struct { + errors.Error +} + +func (p pathError) String() string { return "" } +func (p pathError) optional() bool { return false } +func (p pathError) kind() adt.FeatureType { return 0 } +func (p pathError) feature(r adt.Runtime) adt.Feature { + return adt.InvalidLabel +} + +func valueToSel(v adt.Value) Selector { + switch x := adt.Unwrap(v).(type) { + case *adt.Num: + i, err := x.X.Int64() + if err != nil { + return Selector{&pathError{errors.Promote(err, "invalid number")}} + } + return Index(int(i)) + case *adt.String: + return Str(x.Str) + default: + return Selector{pathError{errors.Newf(token.NoPos, "dynamic selector")}} + } +} + +func featureToSel(f adt.Feature, r adt.Runtime) Selector { + switch f.Typ() { + case adt.StringLabel: + return Str(f.StringValue(r)) + case adt.IntLabel: + return Index(f.Index()) + case adt.DefinitionLabel: + return Def(f.IdentString(r)) + case adt.HiddenLabel, adt.HiddenDefinitionLabel: + ident := f.IdentString(r) + pkg := f.PkgID(r) + return Hid(ident, pkg) + } + return Selector{pathError{ + errors.Newf(token.NoPos, "unexpected feature type %v", f.Typ()), + }} +} diff --git a/vendor/cuelang.org/go/cue/query.go b/vendor/cuelang.org/go/cue/query.go new file mode 100644 index 0000000000..af046ee86f --- /dev/null +++ b/vendor/cuelang.org/go/cue/query.go @@ -0,0 +1,84 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/internal/core/adt" +) + +// This file contains query-related code. + +// getScopePrefix finds the Vertex that exists in v for the longest prefix of p. +// +// It is used to make the parent scopes visible when resolving expressions. +func getScopePrefix(v Value, p Path) Value { + for _, sel := range p.Selectors() { + w := v.LookupPath(MakePath(sel)) + if !w.Exists() { + break + } + v = w + } + return v +} + +// LookupPath reports the value for path p relative to v. +func (v Value) LookupPath(p Path) Value { + if v.v == nil { + return Value{} + } + n := v.v + parent := v.parent_ + ctx := v.ctx() + +outer: + for _, sel := range p.path { + f := sel.sel.feature(v.idx) + for _, a := range n.Arcs { + if a.Label == f { + parent = linkParent(parent, n, a) + n = a + continue outer + } + } + if sel.sel.optional() { + x := &adt.Vertex{ + Parent: n, + Label: sel.sel.feature(ctx), + } + n.MatchAndInsert(ctx, x) + if len(x.Conjuncts) > 0 { + x.Finalize(ctx) + parent = linkParent(parent, n, x) + n = x + continue + } + } + + var x *adt.Bottom + if err, ok := sel.sel.(pathError); ok { + x = &adt.Bottom{Err: err.Error} + } else { + x = mkErr(v.idx, n, adt.EvalError, "field not found: %v", sel.sel) + if n.Accept(ctx, f) { + x.Code = adt.IncompleteError + } + x.NotExists = true + } + v := makeValue(v.idx, n, parent) + return newErrValue(v, x) + } + return makeValue(v.idx, n, parent) +} diff --git a/vendor/cuelang.org/go/cue/scanner/fuzz.go b/vendor/cuelang.org/go/cue/scanner/fuzz.go new file mode 100644 index 0000000000..376a57e419 --- /dev/null +++ b/vendor/cuelang.org/go/cue/scanner/fuzz.go @@ -0,0 +1,40 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build gofuzz +// +build gofuzz + +package scanner + +import ( + "cuelang.org/go/cue/token" +) + +func Fuzz(b []byte) int { + retCode := 1 + eh := func(_ token.Pos, msg string, args []interface{}) { + retCode = 0 + } + + var s Scanner + s.Init(token.NewFile("", 1, len(b)), b, eh, ScanComments) + + for { + _, tok, _ := s.Scan() + if tok == token.EOF { + break + } + } + return retCode +} diff --git a/vendor/cuelang.org/go/cue/scanner/scanner.go b/vendor/cuelang.org/go/cue/scanner/scanner.go new file mode 100644 index 0000000000..84b3643d32 --- /dev/null +++ b/vendor/cuelang.org/go/cue/scanner/scanner.go @@ -0,0 +1,991 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package scanner implements a scanner for CUE source text. It takes a []byte +// as source which can then be tokenized through repeated calls to the Scan +// method. +package scanner // import "cuelang.org/go/cue/scanner" + +import ( + "bytes" + "fmt" + "path/filepath" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "cuelang.org/go/cue/token" +) + +// An ErrorHandler is a generic error handler used throughout CUE packages. +// +// The position points to the beginning of the offending value. +type ErrorHandler func(pos token.Pos, msg string, args []interface{}) + +// A Scanner holds the Scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + errh ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + linesSinceLast int + spacesSinceLast int + insertEOL bool // insert a comma before next newline + + quoteStack []quoteInfo + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +type quoteInfo struct { + char rune + numChar int + numHash int +} + +const bom = 0xFEFF // byte order mark, only permitted as very first character + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.errf(s.offset, "illegal character NUL") + case r >= utf8.RuneSelf: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.errf(s.offset, "illegal UTF-8 encoding") + } else if r == bom && s.offset > 0 { + s.errf(s.offset, "illegal byte order mark") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A Mode value is a set of flags (or 0). +// They control scanner behavior. +type Mode uint + +// These constants are options to the Init function. +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens + dontInsertCommas // do not automatically insert commas - for testing only +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +func (s *Scanner) Init(file *token.File, src []byte, eh ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.errh = eh + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.insertEOL = false + s.ErrorCount = 0 + + s.next() + if s.ch == bom { + s.next() // ignore BOM at file beginning + } +} + +func (s *Scanner) errf(offs int, msg string, args ...interface{}) { + if s.errh != nil { + s.errh(s.file.Pos(offs, 0), msg, args) + } + s.ErrorCount++ +} + +var prefix = []byte("//line ") + +func (s *Scanner) interpretLineComment(text []byte) { + if bytes.HasPrefix(text, prefix) { + // get filename and line number, if any + if i := bytes.LastIndex(text, []byte{':'}); i > 0 { + if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 { + // valid //line filename:line comment + filename := string(bytes.TrimSpace(text[len(prefix):i])) + if filename != "" { + filename = filepath.Clean(filename) + if !filepath.IsAbs(filename) { + // make filename relative to current directory + filename = filepath.Join(s.dir, filename) + } + } + // update scanner position + s.file.AddLineInfo(s.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line + } + } + } +} + +func (s *Scanner) scanComment() string { + // initial '/' already consumed; s.ch == '/' || s.ch == '*' + offs := s.offset - 1 // position of initial '/' + hasCR := false + + if s.ch == '/' { + //-style comment + s.next() + for s.ch != '\n' && s.ch >= 0 { + if s.ch == '\r' { + hasCR = true + } + s.next() + } + if offs == s.lineOffset { + // comment starts at the beginning of the current line + s.interpretLineComment(s.src[offs:s.offset]) + } + goto exit + } + + s.errf(offs, "comment not terminated") + +exit: + lit := s.src[offs:s.offset] + if hasCR { + // TODO: preserve /r/n + lit = stripCR(lit) + } + + return string(lit) +} + +func (s *Scanner) findLineEnd() bool { + // initial '/' already consumed + + defer func(offs int) { + // reset scanner state to where it was upon calling findLineEnd + s.ch = '/' + s.offset = offs + s.rdOffset = offs + 1 + s.next() // consume initial '/' again + }(s.offset - 1) + + // read ahead until a newline, EOF, or non-comment token is found + for s.ch == '/' || s.ch == '*' { + if s.ch == '/' { + //-style comment always contains a newline + return true + } + /*-style comment: look for newline */ + s.next() + for s.ch >= 0 { + ch := s.ch + if ch == '\n' { + return true + } + s.next() + if ch == '*' && s.ch == '/' { + s.next() + break + } + } + s.skipWhitespace(0) // s.insertSemi is set + if s.ch < 0 || s.ch == '\n' { + return true + } + if s.ch != '/' { + // non-comment token + return false + } + s.next() // consume '/' + } + + return false +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + // TODO(mpvl): Is this correct? + return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) +} + +func (s *Scanner) scanFieldIdentifier() string { + offs := s.offset + if s.ch == '_' { + s.next() + } + if s.ch == '#' { + s.next() + // TODO: remove this block to allow # + if isDigit(s.ch) { + return string(s.src[offs:s.offset]) + } + } + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '_' || s.ch == '$' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '_' || s.ch == '$' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isExtendedIdent(r rune) bool { + return strings.IndexRune("-_#$%. ", r) >= 0 +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case ch == '_': + return 0 + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func (s *Scanner) scanMantissa(base int) { + var last rune + for digitVal(s.ch) < base { + if last == '_' && s.ch == '_' { + s.errf(s.offset, "illegal '_' in number") + } + last = s.ch + s.next() + } + if last == '_' { + s.errf(s.offset-1, "illegal '_' in number") + } +} + +func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) { + // digitVal(s.ch) < 10 + offs := s.offset + tok := token.INT + + if seenDecimalPoint { + offs-- + tok = token.FLOAT + s.scanMantissa(10) + goto exponent + } + + if s.ch == '0' { + // int or float + offs := s.offset + s.next() + if s.ch == 'x' || s.ch == 'X' { + // hexadecimal int + s.next() + s.scanMantissa(16) + if s.offset-offs <= 2 { + // only scanned "0x" or "0X" + s.errf(offs, "illegal hexadecimal number") + } + } else if s.ch == 'b' { + // binary int + s.next() + s.scanMantissa(2) + if s.offset-offs <= 2 { + // only scanned "0b" + s.errf(offs, "illegal binary number") + } + } else if s.ch == 'o' { + // octal int + s.next() + s.scanMantissa(8) + if s.offset-offs <= 2 { + // only scanned "0o" + s.errf(offs, "illegal octal number") + } + } else { + // 0 or float + seenDigits := false + if s.ch >= '0' && s.ch <= '9' { + seenDigits = true + s.scanMantissa(10) + } + if s.ch == '.' || s.ch == 'e' || s.ch == 'E' { + goto fraction + } + if seenDigits { + // integer other than 0 may not start with 0 + s.errf(offs, "illegal integer number") + } + } + goto exit + } + + // decimal int or float + s.scanMantissa(10) + + // TODO: allow 3h4s, etc. + // switch s.ch { + // case 'h', 'm', 's', "µ"[0], 'u', 'n': + // } + +fraction: + if s.ch == '.' { + if p := s.offset + 1; p < len(s.src) && s.src[p] == '.' { + // interpret dot as part of a range. + goto exit + } + tok = token.FLOAT + s.next() + s.scanMantissa(10) + } + +exponent: + switch s.ch { + case 'K', 'M', 'G', 'T', 'P': + tok = token.INT // TODO: Or should we allow this to be a float? + s.next() + if s.ch == 'i' { + s.next() + } + goto exit + } + + if s.ch == 'e' || s.ch == 'E' { + tok = token.FLOAT + s.next() + if s.ch == '-' || s.ch == '+' { + s.next() + } + s.scanMantissa(10) + } + +exit: + return tok, string(s.src[offs:s.offset]) +} + +// scanEscape parses an escape sequence where rune is the accepted +// escaped quote. In case of a syntax error, it stops at the offending +// character (without consuming it) and returns false. Otherwise +// it returns true. +// +// Must be compliant with https://tools.ietf.org/html/rfc4627. +func (s *Scanner) scanEscape(quote quoteInfo) (ok, interpolation bool) { + for i := 0; i < quote.numHash; i++ { + if s.ch != '#' { + return true, false + } + s.next() + } + + offs := s.offset + + var n int + var base, max uint32 + switch s.ch { + case '(': + return true, true + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '/', quote.char: + s.next() + return true, false + case '0', '1', '2', '3', '4', '5', '6', '7': + n, base, max = 3, 8, 255 + case 'x': + s.next() + n, base, max = 2, 16, 255 + case 'u': + s.next() + n, base, max = 4, 16, unicode.MaxRune + case 'U': + s.next() + n, base, max = 8, 16, unicode.MaxRune + default: + msg := "unknown escape sequence" + if s.ch < 0 { + msg = "escape sequence not terminated" + } + s.errf(offs, msg) + return false, false + } + + var x uint32 + for n > 0 { + d := uint32(digitVal(s.ch)) + if d >= base { + if s.ch < 0 { + s.errf(s.offset, "escape sequence not terminated") + } else { + s.errf(s.offset, "illegal character %#U in escape sequence", s.ch) + } + return false, false + } + x = x*base + d + s.next() + n-- + } + + // TODO: this is valid JSON, so remove, but normalize and report an error + // if for unmatched surrogate pairs . + if x > max { + s.errf(offs, "escape sequence is invalid Unicode code point") + return false, false + } + + return true, false +} + +func (s *Scanner) scanString(offs int, quote quoteInfo) (token.Token, string) { + // ", """, ', or ''' opening already consumed + + tok := token.STRING + + hasCR := false + extra := 0 + for { + ch := s.ch + if (quote.numChar != 3 && ch == '\n') || ch < 0 { + s.errf(offs, "string literal not terminated") + lit := s.src[offs:s.offset] + if hasCR { + lit = stripCR(lit) + } + return tok, string(lit) + } + + s.next() + ch, ok := s.consumeStringClose(ch, quote) + if ok { + break + } + if ch == '\r' && quote.numChar == 3 { + hasCR = true + } + if ch == '\\' { + if _, interpolation := s.scanEscape(quote); interpolation { + tok = token.INTERPOLATION + extra = 1 + s.quoteStack = append(s.quoteStack, quote) + break + } + } + } + lit := s.src[offs : s.offset+extra] + if hasCR { + lit = stripCR(lit) + } + return tok, string(lit) +} + +func (s *Scanner) consumeQuotes(quote rune, max int) (next rune, n int) { + for ; n < max; n++ { + if s.ch != quote { + return s.ch, n + } + s.next() + } + return s.ch, n +} + +func (s *Scanner) consumeStringClose(ch rune, quote quoteInfo) (next rune, atEnd bool) { + if quote.char != ch { + return ch, false + } + numChar := quote.numChar + n := numChar + quote.numHash + want := quote.char + for i := 1; i < n; i++ { + if i == numChar { + want = '#' + } + if want != s.ch { + return ch, false + } + ch = s.ch + s.next() + } + return s.ch, true +} + +func (s *Scanner) checkHashCount(offs int, quote quoteInfo) { + for i := 0; i < quote.numHash; i++ { + if s.ch != '#' { + s.errf(offs, "string literal not terminated") + return + } + s.next() + } +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +// scanAttribute scans aa full attribute of the form @foo(str). An attribute +// is a lexical entry and as such whitespace is treated as normal characters +// within the attribute. +func (s *Scanner) scanAttribute() (tok token.Token, lit string) { + offs := s.offset - 1 // @ already consumed + + s.scanIdentifier() + + if _, tok, _ := s.Scan(); tok == token.LPAREN { + s.scanAttributeTokens(token.RPAREN) + } else { + s.errf(s.offset, "invalid attribute: expected '('") + } + return token.ATTRIBUTE, string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanAttributeTokens(close token.Token) { + for { + switch _, tok, _ := s.Scan(); tok { + case close: + return + case token.EOF: + s.errf(s.offset, "attribute missing '%s'", close) + return + + case token.INTERPOLATION: + s.errf(s.offset, "interpolation not allowed in attribute") + s.popInterpolation() + s.recoverParen(1) + case token.LPAREN: + s.scanAttributeTokens(token.RPAREN) + case token.LBRACE: + s.scanAttributeTokens(token.RBRACE) + case token.LBRACK: + s.scanAttributeTokens(token.RBRACK) + case token.RPAREN, token.RBRACK, token.RBRACE: + s.errf(s.offset, "unexpected '%s'", tok) + } + } +} + +// recoverParen is an approximate recovery mechanism to recover from invalid +// attributes. +func (s *Scanner) recoverParen(open int) { + for { + switch s.ch { + case '\n', -1: + return + case '(': + open++ + case ')': + if open--; open == 0 { + return + } + } + s.next() + } +} + +func (s *Scanner) skipWhitespace(inc int) { + for { + switch s.ch { + case ' ', '\t': + s.spacesSinceLast += inc + case '\n': + s.linesSinceLast += inc + if s.insertEOL { + return + } + case '\r': + default: + return + } + s.next() + } +} + +// Helper functions for scanning multi-byte tokens such as >> += >>= . +// Different routines recognize different length tok_i based on matches +// of ch_i. If a token ends in '=', the result is tok1 or tok3 +// respectively. Otherwise, the result is tok0 if there was no other +// matching character, or tok2 if the matching character was ch2. + +func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token { + if s.ch == '=' { + s.next() + return tok1 + } + return tok0 +} + +func (s *Scanner) popInterpolation() quoteInfo { + quote := s.quoteStack[len(s.quoteStack)-1] + s.quoteStack = s.quoteStack[:len(s.quoteStack)-1] + return quote +} + +// ResumeInterpolation resumes scanning of a string interpolation. +func (s *Scanner) ResumeInterpolation() string { + quote := s.popInterpolation() + _, str := s.scanString(s.offset-1, quote) + return str +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// EOF. +// +// If the returned token is a literal (IDENT, INT, FLOAT, +// IMAG, CHAR, STRING) or COMMENT, the literal string +// has the corresponding value. +// +// If the returned token is a keyword, the literal string is the keyword. +// +// If the returned token is Comma, the corresponding +// literal string is "," if the comma was present in the source, +// and "\n" if the semicolon was inserted because of a newline or +// at EOF. +// +// If the returned token is ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace(1) + + var rel token.RelPos + switch { + case s.linesSinceLast > 1: + rel = token.NewSection + case s.linesSinceLast == 1: + rel = token.Newline + case s.spacesSinceLast > 0: + rel = token.Blank + default: + rel = token.NoSpace + } + // current token start + offset := s.offset + pos = s.file.Pos(offset, rel) + + // determine token value + insertEOL := false + var quote quoteInfo + switch ch := s.ch; { + case '0' <= ch && ch <= '9': + insertEOL = true + tok, lit = s.scanNumber(false) + case isLetter(ch), ch == '$', ch == '#': + lit = s.scanFieldIdentifier() + if len(lit) > 1 { + // keywords are longer than one letter - avoid lookup otherwise + tok = token.Lookup(lit) + insertEOL = true + break + } + if ch != '#' || (s.ch != '\'' && s.ch != '"' && s.ch != '#') { + tok = token.IDENT + insertEOL = true + break + } + quote.numHash = 1 + ch = s.ch + fallthrough + default: + s.next() // always make progress + switch ch { + case -1: + if s.insertEOL { + s.insertEOL = false // EOF consumed + return s.file.Pos(offset, token.Elided), token.COMMA, "\n" + } + tok = token.EOF + case '_': + if s.ch == '|' { + // Unconditionally require this to be followed by another + // underscore to avoid needing an extra lookahead. + // Note that `_|x` is always equal to _. + s.next() + if s.ch != '_' { + s.errf(s.file.Offset(pos), "illegal token '_|'; expected '_'") + insertEOL = s.insertEOL // preserve insertComma info + tok = token.ILLEGAL + lit = "_|" + break + } + s.next() + tok = token.BOTTOM + lit = "_|_" + } else { + tok = token.IDENT + lit = "_" + s.scanFieldIdentifier() + } + insertEOL = true + + case '\n': + // we only reach here if s.insertComma was + // set in the first place and exited early + // from s.skipWhitespace() + s.insertEOL = false // newline consumed + p := s.file.Pos(offset, token.Elided) + s.skipWhitespace(1) + // Don't elide comma before a ',' or ':' to ensure JSON + // conformance. Note that cue fmt should immediately undo those. + if s.ch == ',' || s.ch == ':' { + return s.Scan() + } + return p, token.COMMA, "\n" + + case '#': + for quote.numHash++; s.ch == '#'; quote.numHash++ { + s.next() + } + ch = s.ch + if ch != '\'' && ch != '"' { + break + } + s.next() + fallthrough + case '"', '\'': + insertEOL = true + quote.char = ch + quote.numChar = 1 + offs := s.offset - 1 - quote.numHash + switch _, n := s.consumeQuotes(ch, 2); n { + case 0: + quote.numChar = 1 + tok, lit = s.scanString(offs, quote) + case 1: + s.checkHashCount(offs, quote) + tok, lit = token.STRING, string(s.src[offs:s.offset]) + case 2: + quote.numChar = 3 + switch s.ch { + case '\n': + s.next() + tok, lit = s.scanString(offs, quote) + case '\r': + s.next() + if s.ch == '\n' { + s.next() + tok, lit = s.scanString(offs, quote) + break + } + fallthrough + default: + s.errf(offs, "expected newline after multiline quote %s", + s.src[offs:s.offset]) + tok, lit = token.STRING, string(s.src[offs:s.offset]) + } + } + case '@': + insertEOL = true + tok, lit = s.scanAttribute() + case ':': + if s.ch == ':' { + s.next() + tok = token.ISA + } else { + tok = token.COLON + } + case ';': + tok = token.SEMICOLON + insertEOL = true + case '?': + tok = token.OPTION + insertEOL = true + case '.': + if '0' <= s.ch && s.ch <= '9' { + insertEOL = true + tok, lit = s.scanNumber(true) + } else if s.ch == '.' { + s.next() + if s.ch == '.' { + s.next() + tok = token.ELLIPSIS + insertEOL = true + } else { + s.errf(s.file.Offset(pos), "illegal token '..'; expected '.'") + } + } else { + tok = token.PERIOD + } + case ',': + tok = token.COMMA + lit = "," + case '(': + tok = token.LPAREN + case ')': + insertEOL = true + tok = token.RPAREN + case '[': + tok = token.LBRACK + case ']': + insertEOL = true + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + insertEOL = true + tok = token.RBRACE + case '+': + tok = token.ADD // Consider ++ for list concatenate. + case '-': + tok = token.SUB + case '*': + tok = token.MUL + case '/': + if s.ch == '/' { + // comment + if s.insertEOL && s.findLineEnd() { + // reset position to the beginning of the comment + s.ch = '/' + s.offset = s.file.Offset(pos) + s.rdOffset = s.offset + 1 + s.insertEOL = false // newline consumed + return s.file.Pos(offset, token.Elided), token.COMMA, "\n" + } + comment := s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + s.insertEOL = false // newline consumed + goto scanAgain + } + tok = token.COMMENT + lit = comment + } else { + tok = token.QUO + } + // We no longer use %, but seems like a useful token to use for + // something else at some point. + // case '%': + case '<': + if s.ch == '-' { + s.next() + tok = token.ARROW + } else { + tok = s.switch2(token.LSS, token.LEQ) + } + case '>': + tok = s.switch2(token.GTR, token.GEQ) + case '=': + if s.ch == '~' { + s.next() + tok = token.MAT + } else { + tok = s.switch2(token.BIND, token.EQL) + } + case '!': + if s.ch == '~' { + s.next() + tok = token.NMAT + } else { + tok = s.switch2(token.NOT, token.NEQ) + } + case '&': + switch s.ch { + case '&': + s.next() + tok = token.LAND + default: + tok = token.AND + } + case '|': + if s.ch == '|' { + s.next() + tok = token.LOR + } else { + tok = token.OR + } + default: + // next reports unexpected BOMs - don't repeat + if ch != bom { + s.errf(s.file.Offset(pos), "illegal character %#U", ch) + } + insertEOL = s.insertEOL // preserve insertSemi info + tok = token.ILLEGAL + lit = string(ch) + } + } + if s.mode&dontInsertCommas == 0 { + s.insertEOL = insertEOL + } + + s.linesSinceLast = 0 + s.spacesSinceLast = 0 + return +} diff --git a/vendor/cuelang.org/go/cue/token/position.go b/vendor/cuelang.org/go/cue/token/position.go new file mode 100644 index 0000000000..937108382b --- /dev/null +++ b/vendor/cuelang.org/go/cue/token/position.go @@ -0,0 +1,472 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (byte count) + // RelPos Pos // relative position information +} + +// IsValid reports whether the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file, as well as +// relative positioning information. It can be converted into a Position for a +// more convenient, but much larger, representation. +// +type Pos struct { + file *File + offset int +} + +// File returns the file that contains the position p or nil if there is no +// such file (for instance for p == NoPos). +// +func (p Pos) File() *File { + if p.index() == 0 { + return nil + } + return p.file +} + +func (p Pos) Line() int { + if p.file == nil { + return 0 + } + return p.Position().Line +} + +func (p Pos) Column() int { + if p.file == nil { + return 0 + } + return p.Position().Column +} + +func (p Pos) Filename() string { + if p.file == nil { + return "" + } + return p.Position().Filename +} + +func (p Pos) Position() Position { + if p.file == nil { + return Position{} + } + return p.file.Position(p) +} + +func (p Pos) String() string { + return p.Position().String() +} + +// NoPos is the zero value for Pos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +var NoPos = Pos{} + +// RelPos indicates the relative position of token to the previous token. +type RelPos int + +const ( + // NoRelPos indicates no relative position is specified. + NoRelPos RelPos = iota + + // Elided indicates that the token for which this position is defined is + // not rendered at all. + Elided + + // NoSpace indicates there is no whitespace after this token. + NoSpace + + // Blank means there is horizontal space after this token. + Blank + + // Newline means there is a single newline after this token. + Newline + + // NewSection means there are two or more newlines after this token. + NewSection + + relMask = 0xf + relShift = 4 +) + +var relNames = []string{ + "invalid", "elided", "nospace", "blank", "newline", "section", +} + +func (p RelPos) String() string { return relNames[p] } + +func (p RelPos) Pos() Pos { + return Pos{nil, int(p)} +} + +// HasRelPos repors whether p has a relative position. +func (p Pos) HasRelPos() bool { + return p.offset&relMask != 0 + +} + +func (p Pos) Before(q Pos) bool { + return p.file == q.file && p.Offset() < q.Offset() +} + +// Offset reports the byte offset relative to the file. +func (p Pos) Offset() int { + return p.Position().Offset +} + +// Add creates a new position relative to the p offset by n. +func (p Pos) Add(n int) Pos { + return Pos{p.file, p.offset + toPos(index(n))} +} + +// IsValid reports whether the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// IsNewline reports whether the relative information suggests this node should +// be printed on a new lien. +func (p Pos) IsNewline() bool { + return p.RelPos() >= Newline +} + +func (p Pos) WithRel(rel RelPos) Pos { + return Pos{p.file, p.offset&^relMask | int(rel)} +} + +func (p Pos) RelPos() RelPos { + return RelPos(p.offset & relMask) +} + +func (p Pos) index() index { + return index(p.offset) >> relShift +} + +func toPos(x index) int { + return (int(x) << relShift) +} + +// ----------------------------------------------------------------------------- +// File + +type index int + +// A File has a name, size, and line offset table. +type File struct { + mutex sync.RWMutex + name string // file name as provided to AddFile + base index // Pos index range for this file is [base...base+size] + size index // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []index // lines contains the offset of the first character for each line (the first entry is always 0) + infos []lineInfo +} + +// NewFile returns a new file. +func NewFile(filename string, base, size int) *File { + if base < 0 { + base = 1 + } + return &File{sync.RWMutex{}, filename, index(base), index(size), []index{0}, nil} +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return int(f.base) +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return int(f.size) +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.mutex.RLock() + n := len(f.lines) + f.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + x := index(offset) + f.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < x) && x < f.size { + f.lines = append(f.lines, x) + } + f.mutex.Unlock() +} + +// MergeLine merges a line with the following line. It is akin to replacing +// the newline character at the end of the line with a space (to not change the +// remaining offsets). To obtain the line number, consult e.g. Position.Line. +// MergeLine will panic if given an invalid line number. +// +func (f *File) MergeLine(line int) { + if line <= 0 { + panic("illegal line number (line numbering starts at 1)") + } + f.mutex.Lock() + defer f.mutex.Unlock() + if line >= len(f.lines) { + panic("illegal line number") + } + // To merge the line numbered with the line numbered , + // we need to remove the entry in lines corresponding to the line + // numbered . The entry in lines corresponding to the line + // numbered is located at index , since indices in lines + // are 0-based and line numbers are 1-based. + copy(f.lines[line:], f.lines[line+1:]) + f.lines = f.lines[:len(f.lines)-1] +} + +// SetLines sets the line offsets for a file and reports whether it succeeded. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// Callers must not mutate the provided slice after SetLines returns. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= index(offset) { + return false + } + } + + // set lines table + f.mutex.Lock() + f.lines = f.lines[:0] + for _, l := range lines { + f.lines = append(f.lines, index(l)) + } + f.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +// It ignores position-altering //line comments. +func (f *File) SetLinesForContent(content []byte) { + var lines []index + line := index(0) + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = index(offset) + 1 + } + } + + // set lines table + f.mutex.Lock() + f.lines = lines + f.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + x := index(offset) + f.mutex.Lock() + if i := len(f.infos); i == 0 || index(f.infos[i-1].Offset) < x && x < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int, rel RelPos) Pos { + if index(offset) > f.size { + panic("illegal file offset") + } + return Pos{f, toPos(f.base+index(offset)) + int(rel)} +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + x := p.index() + if x < f.base || x > f.base+index(f.size) { + panic("illegal Pos value") + } + return int(x - f.base) +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// unpack returns the filename and line and column number for a file offset. +// If adjusted is set, unpack will return the filename and line information +// possibly adjusted by //line comments; otherwise those comments are ignored. +// +func (f *File) unpack(offset index, adjusted bool) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = int(i+1), int(offset-f.lines[i]+1) + } + if adjusted && len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, int(offset)); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, index(alt.Offset)); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos, adjusted bool) (pos Position) { + offset := p.index() - f.base + pos.Offset = int(offset) + pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted) + return +} + +// PositionFor returns the Position value for the given file position p. +// If adjusted is set, the position may be adjusted by position-altering +// //line comments; otherwise those comments are ignored. +// p must be a Pos value in f or NoPos. +// +func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) { + x := p.index() + if p != NoPos { + if x < f.base || x > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p, adjusted) + } + return +} + +// Position returns the Position value for the given file position p. +// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true). +// +func (f *File) Position(p Pos) (pos Position) { + return f.PositionFor(p, true) +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []index, x index) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/cuelang.org/go/cue/token/token.go b/vendor/cuelang.org/go/cue/token/token.go new file mode 100644 index 0000000000..5e15443449 --- /dev/null +++ b/vendor/cuelang.org/go/cue/token/token.go @@ -0,0 +1,266 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package token defines constants representing the lexical tokens of the Go +// programming language and basic operations on tokens (printing, predicates). +package token // import "cuelang.org/go/cue/token" + +import "strconv" + +// Token is the set of lexical tokens of the CUE configuration language. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + ATTRIBUTE // @foo(bar,baz=4) + + literalBeg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // main, _tmp + INT // 12_345Mi, 0700, 0xdeadbeef, 1.2M + FLOAT // 123.45, + // DURATION // 3m4s TODO + STRING // "abc" + INTERPOLATION // a part of a template string, e.g. `"age: \(` + BOTTOM // _|_ + + literalEnd + + operatorBeg + // Operators and delimiters + ADD // + + SUB // - + MUL // * + POW // ^ + QUO // / + + IQUO // quo + IREM // rem + IDIV // div + IMOD // mod + + AND // & + OR // | + + LAND // && + LOR // || + + BIND // = + EQL // == + LSS // < + GTR // > + NOT // ! + ARROW // <- + + NEQ // != + LEQ // <= + GEQ // >= + + MAT // =~ + NMAT // !~ + + LPAREN // ( + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + ELLIPSIS // ... + + RPAREN // ) + RBRACK // ] + RBRACE // } + SEMICOLON // ; + COLON // : + ISA // :: + OPTION // ? + operatorEnd + + keywordBeg + + IF + FOR + IN + LET + + TRUE + FALSE + NULL + + keywordEnd +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + INT: "INT", + FLOAT: "FLOAT", + STRING: "STRING", + INTERPOLATION: "INTERPOLATION", + ATTRIBUTE: "ATTRIBUTE", + + ADD: "+", + SUB: "-", + MUL: "*", + POW: "^", + QUO: "/", + + IQUO: "quo", + IREM: "rem", + IDIV: "div", + IMOD: "mod", + + AND: "&", + OR: "|", + + LAND: "&&", + LOR: "||", + + BIND: "=", + EQL: "==", + LSS: "<", + GTR: ">", + NOT: "!", + ARROW: "<-", + + NEQ: "!=", + LEQ: "<=", + GEQ: ">=", + + MAT: "=~", + NMAT: "!~", + + LPAREN: "(", + LBRACK: "[", + LBRACE: "{", + COMMA: ",", + PERIOD: ".", + ELLIPSIS: "...", + + RPAREN: ")", + RBRACK: "]", + RBRACE: "}", + SEMICOLON: ";", + COLON: ":", + ISA: "::", + OPTION: "?", + + BOTTOM: "_|_", + + FALSE: "false", + TRUE: "true", + NULL: "null", + + FOR: "for", + IF: "if", + IN: "in", + LET: "let", +} + +// String returns the string corresponding to the token tok. +// For operators, delimiters, and keywords the string is the actual +// token character sequence (e.g., for the token ADD, the string is +// "+"). For all other tokens the string corresponds to the token +// constant name (e.g. for the token IDENT, the string is "IDENT"). +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// A set of constants for precedence-based expression parsing. +// Non-operators have lowest precedence, followed by operators +// starting with precedence 1 up to unary operators. The highest +// precedence serves as "catch-all" precedence for selector, +// indexing, and other operator and delimiter tokens. +const ( + LowestPrec = lowestPrec + UnaryPrec = unaryPrec + HighestPrec = highestPrec +) + +const ( + lowestPrec = 0 // non-operators + unaryPrec = 8 + highestPrec = 9 +) + +// Precedence returns the operator precedence of the binary +// operator op. If op is not a binary operator, the result +// is LowestPrecedence. +// +func (tok Token) Precedence() int { + switch tok { + case OR: + return 1 + case AND: + return 2 + case LOR: + return 3 + case LAND: + return 4 + case EQL, NEQ, LSS, LEQ, GTR, GEQ, MAT, NMAT: + return 5 + case ADD, SUB: + return 6 + case MUL, QUO, IDIV, IMOD, IQUO, IREM: + return 7 + } + return lowestPrec +} + +var keywords map[string]Token + +func init() { + keywords = make(map[string]Token) + for i := keywordBeg + 1; i < keywordEnd; i++ { + keywords[tokens[i]] = i + } +} + +// Lookup maps an identifier to its keyword token or IDENT (if not a keyword). +// +func Lookup(ident string) Token { + if tok, isKeyword := keywords[ident]; isKeyword { + return tok + } + return IDENT +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +func (tok Token) IsLiteral() bool { return literalBeg < tok && tok < literalEnd } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (tok Token) IsOperator() bool { return operatorBeg < tok && tok < operatorEnd } + +// IsKeyword returns true for tokens corresponding to keywords; +// it returns false otherwise. +func (tok Token) IsKeyword() bool { return keywordBeg < tok && tok < keywordEnd } diff --git a/vendor/cuelang.org/go/cue/types.go b/vendor/cuelang.org/go/cue/types.go new file mode 100644 index 0000000000..11528bef19 --- /dev/null +++ b/vendor/cuelang.org/go/cue/types.go @@ -0,0 +1,2536 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "strings" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" + "cuelang.org/go/internal/core/adt" + "cuelang.org/go/internal/core/compile" + "cuelang.org/go/internal/core/convert" + "cuelang.org/go/internal/core/eval" + "cuelang.org/go/internal/core/export" + "cuelang.org/go/internal/core/runtime" + "cuelang.org/go/internal/core/subsume" + "cuelang.org/go/internal/core/validate" + "cuelang.org/go/internal/types" +) + +// Kind determines the underlying type of a Value. +type Kind = adt.Kind + +const ( + // BottomKind represents the bottom value. + BottomKind Kind = adt.BottomKind + + // NullKind indicates a null value. + NullKind Kind = adt.NullKind + + // BoolKind indicates a boolean value. + BoolKind Kind = adt.BoolKind + + // IntKind represents an integral number. + IntKind Kind = adt.IntKind + + // FloatKind represents a decimal float point number that cannot be + // converted to an integer. The underlying number may still be integral, + // but resulting from an operation that enforces the float type. + FloatKind Kind = adt.FloatKind + + // StringKind indicates any kind of string. + StringKind Kind = adt.StringKind + + // BytesKind is a blob of data. + BytesKind Kind = adt.BytesKind + + // StructKind is a kev-value map. + StructKind Kind = adt.StructKind + + // ListKind indicates a list of values. + ListKind Kind = adt.ListKind + + // _numberKind is used as a implementation detail inside + // Kind.String to indicate NumberKind. + + // NumberKind represents any kind of number. + NumberKind Kind = IntKind | FloatKind + + // TopKind represents the top value. + TopKind Kind = adt.TopKind +) + +// An structValue represents a JSON object. +// +// TODO: remove +type structValue struct { + ctx *adt.OpContext + v Value + obj *adt.Vertex + features []adt.Feature +} + +type hiddenStructValue = structValue + +// Len reports the number of fields in this struct. +func (o *hiddenStructValue) Len() int { + if o.obj == nil { + return 0 + } + return len(o.features) +} + +// At reports the key and value of the ith field, i < o.Len(). +func (o *hiddenStructValue) At(i int) (key string, v Value) { + f := o.features[i] + return o.v.idx.LabelStr(f), newChildValue(o, i) +} + +func (o *hiddenStructValue) at(i int) (v *adt.Vertex, isOpt bool) { + f := o.features[i] + arc := o.obj.Lookup(f) + if arc == nil { + arc = &adt.Vertex{ + Parent: o.v.v, + Label: f, + } + o.obj.MatchAndInsert(o.ctx, arc) + arc.Finalize(o.ctx) + isOpt = true + } + return arc, isOpt +} + +// Lookup reports the field for the given key. The returned Value is invalid +// if it does not exist. +func (o *hiddenStructValue) Lookup(key string) Value { + f := o.v.idx.StrLabel(key) + i := 0 + len := o.Len() + for ; i < len; i++ { + if o.features[i] == f { + break + } + } + if i == len { + x := mkErr(o.v.idx, o.obj, 0, "field not found: %v", key) + x.NotExists = true + // TODO: more specifically we should test whether the values that + // are addressable from the root of the configuration can support the + // looked up value. This will avoid false positives such as when + // an open literal struct is passed to a builtin. + if o.obj.Accept(o.ctx, f) { + x.Code = adt.IncompleteError + } + return newErrValue(o.v, x) + } + return newChildValue(o, i) +} + +// MarshalJSON returns a valid JSON encoding or reports an error if any of the +// fields is invalid. +func (o *structValue) marshalJSON() (b []byte, err errors.Error) { + b = append(b, '{') + n := o.Len() + for i := 0; i < n; i++ { + k, v := o.At(i) + s, err := json.Marshal(k) + if err != nil { + return nil, unwrapJSONError(err) + } + b = append(b, s...) + b = append(b, ':') + bb, err := json.Marshal(v) + if err != nil { + return nil, unwrapJSONError(err) + } + b = append(b, bb...) + if i < n-1 { + b = append(b, ',') + } + } + b = append(b, '}') + return b, nil +} + +var _ errors.Error = &marshalError{} + +type marshalError struct { + err errors.Error + b *adt.Bottom +} + +func toMarshalErr(v Value, b *adt.Bottom) error { + return &marshalError{v.toErr(b), b} +} + +func marshalErrf(v Value, src adt.Node, code adt.ErrorCode, msg string, args ...interface{}) error { + arguments := append([]interface{}{code, msg}, args...) + b := mkErr(v.idx, src, arguments...) + return toMarshalErr(v, b) +} + +func (e *marshalError) Error() string { + return fmt.Sprintf("cue: marshal error: %v", e.err) +} + +func (e *marshalError) Bottom() *adt.Bottom { return e.b } +func (e *marshalError) Path() []string { return e.err.Path() } +func (e *marshalError) Msg() (string, []interface{}) { return e.err.Msg() } +func (e *marshalError) Position() token.Pos { return e.err.Position() } +func (e *marshalError) InputPositions() []token.Pos { + return e.err.InputPositions() +} + +func unwrapJSONError(err error) errors.Error { + switch x := err.(type) { + case *json.MarshalerError: + return unwrapJSONError(x.Err) + case *marshalError: + return x + case errors.Error: + return &marshalError{x, nil} + default: + return &marshalError{errors.Wrapf(err, token.NoPos, "json error"), nil} + } +} + +// An Iterator iterates over values. +// +type Iterator struct { + val Value + idx *runtime.Runtime + ctx *adt.OpContext + arcs []field + p int + cur Value + f adt.Feature + isOpt bool +} + +type hiddenIterator = Iterator + +type field struct { + arc *adt.Vertex + isOptional bool +} + +// Next advances the iterator to the next value and reports whether there was +// any. It must be called before the first call to Value or Key. +func (i *Iterator) Next() bool { + if i.p >= len(i.arcs) { + i.cur = Value{} + return false + } + f := i.arcs[i.p] + f.arc.Finalize(i.ctx) + p := linkParent(i.val.parent_, i.val.v, f.arc) + i.cur = makeValue(i.val.idx, f.arc, p) + i.f = f.arc.Label + i.isOpt = f.isOptional + i.p++ + return true +} + +// Value returns the current value in the list. It will panic if Next advanced +// past the last entry. +func (i *Iterator) Value() Value { + return i.cur +} + +// Selector reports the field label of this iteration. +func (i *Iterator) Selector() Selector { + return featureToSel(i.f, i.idx) +} + +// Label reports the label of the value if i iterates over struct fields and "" +// otherwise. +// +// +// Slated to be deprecated: use i.Selector().String(). Note that this will give +// more accurate string representations. +func (i *hiddenIterator) Label() string { + if i.f == 0 { + return "" + } + return i.idx.LabelStr(i.f) +} + +// IsHidden reports if a field is hidden from the data model. +// +// Deprecated: use i.Selector().PkgPath() != "" +func (i *hiddenIterator) IsHidden() bool { + return i.f.IsHidden() +} + +// IsOptional reports if a field is optional. +func (i *Iterator) IsOptional() bool { + return i.isOpt +} + +// IsDefinition reports if a field is a definition. +// +// Deprecated: use i.Selector().IsDefinition() +func (i *hiddenIterator) IsDefinition() bool { + return i.f.IsDef() +} + +// marshalJSON iterates over the list and generates JSON output. HasNext +// will return false after this operation. +func marshalList(l *Iterator) (b []byte, err errors.Error) { + b = append(b, '[') + if l.Next() { + for i := 0; ; i++ { + x, err := json.Marshal(l.Value()) + if err != nil { + return nil, unwrapJSONError(err) + } + b = append(b, x...) + if !l.Next() { + break + } + b = append(b, ',') + } + } + b = append(b, ']') + return b, nil +} + +func (v Value) getNum(k adt.Kind) (*adt.Num, errors.Error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, k); err != nil { + return nil, v.toErr(err) + } + n, _ := v.eval(ctx).(*adt.Num) + return n, nil +} + +// MantExp breaks x into its mantissa and exponent components and returns the +// exponent. If a non-nil mant argument is provided its value is set to the +// mantissa of x. The components satisfy x == mant × 10**exp. It returns an +// error if v is not a number. +// +// The components are not normalized. For instance, 2.00 is represented mant == +// 200 and exp == -2. Calling MantExp with a nil argument is an efficient way to +// get the exponent of the receiver. +func (v Value) MantExp(mant *big.Int) (exp int, err error) { + n, err := v.getNum(adt.NumKind) + if err != nil { + return 0, err + } + if n.X.Form != 0 { + return 0, ErrInfinite + } + if mant != nil { + mant.Set(&n.X.Coeff) + if n.X.Negative { + mant.Neg(mant) + } + } + return int(n.X.Exponent), nil +} + +// Decimal is for internal use only. The Decimal type that is returned is +// subject to change. +func (v hiddenValue) Decimal() (d *internal.Decimal, err error) { + n, err := v.getNum(adt.NumKind) + if err != nil { + return nil, err + } + return &n.X, nil +} + +// AppendInt appends the string representation of x in the given base to buf and +// returns the extended buffer, or an error if the underlying number was not +// an integer. +func (v Value) AppendInt(buf []byte, base int) ([]byte, error) { + i, err := v.Int(nil) + if err != nil { + return nil, err + } + return i.Append(buf, base), nil +} + +// AppendFloat appends to buf the string form of the floating-point number x. +// It returns an error if v is not a number. +func (v Value) AppendFloat(buf []byte, fmt byte, prec int) ([]byte, error) { + n, err := v.getNum(adt.NumKind) + if err != nil { + return nil, err + } + ctx := apd.BaseContext + nd := int(apd.NumDigits(&n.X.Coeff)) + int(n.X.Exponent) + if n.X.Form == apd.Infinite { + if n.X.Negative { + buf = append(buf, '-') + } + return append(buf, string('∞')...), nil + } + if fmt == 'f' && nd > 0 { + ctx.Precision = uint32(nd + prec) + } else { + ctx.Precision = uint32(prec) + } + var d apd.Decimal + ctx.Round(&d, &n.X) + return d.Append(buf, fmt), nil +} + +var ( + // ErrBelow indicates that a value was rounded down in a conversion. + ErrBelow = errors.New("value was rounded down") + + // ErrAbove indicates that a value was rounded up in a conversion. + ErrAbove = errors.New("value was rounded up") + + // ErrInfinite indicates that a value is infinite. + ErrInfinite = errors.New("infinite") +) + +// Int converts the underlying integral number to an big.Int. It reports an +// error if the underlying value is not an integer type. If a non-nil *Int +// argument z is provided, Int stores the result in z instead of allocating a +// new Int. +func (v Value) Int(z *big.Int) (*big.Int, error) { + n, err := v.getNum(adt.IntKind) + if err != nil { + return nil, err + } + if z == nil { + z = &big.Int{} + } + if n.X.Exponent != 0 { + panic("cue: exponent should always be nil for integer types") + } + z.Set(&n.X.Coeff) + if n.X.Negative { + z.Neg(z) + } + return z, nil +} + +// Int64 converts the underlying integral number to int64. It reports an +// error if the underlying value is not an integer type or cannot be represented +// as an int64. The result is (math.MinInt64, ErrAbove) for x < math.MinInt64, +// and (math.MaxInt64, ErrBelow) for x > math.MaxInt64. +func (v Value) Int64() (int64, error) { + n, err := v.getNum(adt.IntKind) + if err != nil { + return 0, err + } + if !n.X.Coeff.IsInt64() { + if n.X.Negative { + return math.MinInt64, ErrAbove + } + return math.MaxInt64, ErrBelow + } + i := n.X.Coeff.Int64() + if n.X.Negative { + i = -i + } + return i, nil +} + +// Uint64 converts the underlying integral number to uint64. It reports an +// error if the underlying value is not an integer type or cannot be represented +// as a uint64. The result is (0, ErrAbove) for x < 0, and +// (math.MaxUint64, ErrBelow) for x > math.MaxUint64. +func (v Value) Uint64() (uint64, error) { + n, err := v.getNum(adt.IntKind) + if err != nil { + return 0, err + } + if n.X.Negative { + return 0, ErrAbove + } + if !n.X.Coeff.IsUint64() { + return math.MaxUint64, ErrBelow + } + i := n.X.Coeff.Uint64() + return i, nil +} + +// trimZeros trims 0's for better JSON respresentations. +func trimZeros(s string) string { + n1 := len(s) + s2 := strings.TrimRight(s, "0") + n2 := len(s2) + if p := strings.IndexByte(s2, '.'); p != -1 { + if p == n2-1 { + return s[:len(s2)+1] + } + return s2 + } + if n1-n2 <= 4 { + return s + } + return fmt.Sprint(s2, "e+", n1-n2) +} + +var ( + smallestPosFloat64 *apd.Decimal + smallestNegFloat64 *apd.Decimal + maxPosFloat64 *apd.Decimal + maxNegFloat64 *apd.Decimal +) + +func init() { + const ( + // math.SmallestNonzeroFloat64: 1 / 2**(1023 - 1 + 52) + smallest = "4.940656458412465441765687928682213723651e-324" + // math.MaxFloat64: 2**1023 * (2**53 - 1) / 2**52 + max = "1.797693134862315708145274237317043567981e+308" + ) + ctx := apd.BaseContext + ctx.Precision = 40 + + var err error + smallestPosFloat64, _, err = ctx.NewFromString(smallest) + if err != nil { + panic(err) + } + smallestNegFloat64, _, err = ctx.NewFromString("-" + smallest) + if err != nil { + panic(err) + } + maxPosFloat64, _, err = ctx.NewFromString(max) + if err != nil { + panic(err) + } + maxNegFloat64, _, err = ctx.NewFromString("-" + max) + if err != nil { + panic(err) + } +} + +// Float64 returns the float64 value nearest to x. It reports an error if v is +// not a number. If x is too small to be represented by a float64 (|x| < +// math.SmallestNonzeroFloat64), the result is (0, ErrBelow) or (-0, ErrAbove), +// respectively, depending on the sign of x. If x is too large to be represented +// by a float64 (|x| > math.MaxFloat64), the result is (+Inf, ErrAbove) or +// (-Inf, ErrBelow), depending on the sign of x. +func (v Value) Float64() (float64, error) { + n, err := v.getNum(adt.NumKind) + if err != nil { + return 0, err + } + if n.X.Negative { + if n.X.Cmp(smallestNegFloat64) == 1 { + return -0, ErrAbove + } + if n.X.Cmp(maxNegFloat64) == -1 { + return math.Inf(-1), ErrBelow + } + } else { + if n.X.Cmp(smallestPosFloat64) == -1 { + return 0, ErrBelow + } + if n.X.Cmp(maxPosFloat64) == 1 { + return math.Inf(1), ErrAbove + } + } + f, _ := n.X.Float64() + return f, nil +} + +// Value holds any value, which may be a Boolean, Error, List, Null, Number, +// Struct, or String. +type Value struct { + idx *runtime.Runtime + v *adt.Vertex + // Parent keeps track of the parent if the value corresponding to v.Parent + // differs, recursively. + parent_ *parent +} + +// parent is a distinct type from Value to ensure more type safety: Value +// is typically used by value, so taking a pointer to it has a high risk +// or globbering the contents. +type parent struct { + v *adt.Vertex + p *parent +} + +func (v Value) parent() Value { + switch { + case v.v == nil: + return Value{} + case v.parent_ != nil: + return Value{v.idx, v.parent_.v, v.parent_.p} + default: + return Value{v.idx, v.v.Parent, nil} + } +} + +type valueScope Value + +func (v valueScope) Vertex() *adt.Vertex { return v.v } +func (v valueScope) Parent() compile.Scope { + p := Value(v).parent() + if p.v == nil { + return nil + } + return valueScope(p) +} + +type hiddenValue = Value + +// Core is for internal use only. +func (v hiddenValue) Core(x *types.Value) { + x.V = v.v + x.R = v.idx +} + +func newErrValue(v Value, b *adt.Bottom) Value { + node := &adt.Vertex{BaseValue: b} + if v.v != nil { + node.Label = v.v.Label + node.Parent = v.v.Parent + } + node.UpdateStatus(adt.Finalized) + node.AddConjunct(adt.MakeRootConjunct(nil, b)) + return makeChildValue(v.parent(), node) +} + +func newVertexRoot(idx *runtime.Runtime, ctx *adt.OpContext, x *adt.Vertex) Value { + if ctx != nil { + // This is indicative of an zero Value. In some cases this is called + // with an error value. + x.Finalize(ctx) + } else { + x.UpdateStatus(adt.Finalized) + } + return makeValue(idx, x, nil) +} + +func newValueRoot(idx *runtime.Runtime, ctx *adt.OpContext, x adt.Expr) Value { + if n, ok := x.(*adt.Vertex); ok { + return newVertexRoot(idx, ctx, n) + } + node := &adt.Vertex{} + node.AddConjunct(adt.MakeRootConjunct(nil, x)) + return newVertexRoot(idx, ctx, node) +} + +func newChildValue(o *structValue, i int) Value { + arc, _ := o.at(i) + return makeValue(o.v.idx, arc, linkParent(o.v.parent_, o.v.v, arc)) +} + +// Dereference reports the value v refers to if v is a reference or v itself +// otherwise. +func Dereference(v Value) Value { + n := v.v + if n == nil || len(n.Conjuncts) != 1 { + return v + } + + c := n.Conjuncts[0] + r, _ := c.Expr().(adt.Resolver) + if r == nil { + return v + } + + ctx := v.ctx() + n, b := ctx.Resolve(c.Env, r) + if b != nil { + return newErrValue(v, b) + } + n.Finalize(ctx) + // NOTE: due to structure sharing, the path of the referred node may end + // up different from the one explicitly pointed to. The value will be the + // same, but the scope may differ. + // TODO(structureshare): see if we can construct the original path. This + // only has to be done if structures are being shared. + return makeValue(v.idx, n, nil) +} + +func makeValue(idx *runtime.Runtime, v *adt.Vertex, p *parent) Value { + if v.Status() == 0 || v.BaseValue == nil { + panic(fmt.Sprintf("not properly initialized (state: %v, value: %T)", + v.Status(), v.BaseValue)) + } + return Value{idx, v, p} +} + +// makeChildValue makes a new value, of which p is the parent, and links the +// parent pointer to p if necessary. +func makeChildValue(p Value, arc *adt.Vertex) Value { + return makeValue(p.idx, arc, linkParent(p.parent_, p.v, arc)) +} + +// linkParent creates the parent struct for an arc, if necessary. +// +// The parent struct is necessary if the parent struct also has a parent struct, +// or if arc is (structurally) shared and does not have node as a parent. +func linkParent(p *parent, node, arc *adt.Vertex) *parent { + if p == nil && node == arc.Parent { + return nil + } + return &parent{node, p} +} + +func remakeValue(base Value, env *adt.Environment, v adt.Expr) Value { + // TODO: right now this is necessary because disjunctions do not have + // populated conjuncts. + if v, ok := v.(*adt.Vertex); ok && v.Status() >= adt.Partial { + return Value{base.idx, v, nil} + } + n := &adt.Vertex{Label: base.v.Label} + n.AddConjunct(adt.MakeRootConjunct(env, v)) + n = manifest(base.ctx(), n) + n.Parent = base.v.Parent + return makeChildValue(base.parent(), n) +} + +func remakeFinal(base Value, env *adt.Environment, v adt.Value) Value { + n := &adt.Vertex{Parent: base.v.Parent, Label: base.v.Label, BaseValue: v} + n.UpdateStatus(adt.Finalized) + return makeChildValue(base.parent(), n) +} + +func (v Value) ctx() *adt.OpContext { + return newContext(v.idx) +} + +// Eval resolves the references of a value and returns the result. +// This method is not necessary to obtain concrete values. +func (v Value) Eval() Value { + if v.v == nil { + return v + } + x := v.v + // x = eval.FinalizeValue(v.idx.Runtime, v.v) + // x.Finalize(v.ctx()) + x = x.ToDataSingle() + return makeValue(v.idx, x, v.parent_) + // return remakeValue(v, nil, ctx.value(x)) +} + +// Default reports the default value and whether it existed. It returns the +// normal value if there is no default. +func (v Value) Default() (Value, bool) { + if v.v == nil { + return v, false + } + + d := v.v.Default() + if d == v.v { + return v, false + } + return makeValue(v.idx, d, v.parent_), true + + // d, ok := v.v.Value.(*adt.Disjunction) + // if !ok { + // return v, false + // } + + // var w *adt.Vertex + + // switch d.NumDefaults { + // case 0: + // return v, false + + // case 1: + // w = d.Values[0] + + // default: + // x := *v.v + // x.Value = &adt.Disjunction{ + // Src: d.Src, + // Values: d.Values[:d.NumDefaults], + // NumDefaults: 0, + // } + // w = &x + // } + + // w.Conjuncts = nil + // for _, c := range v.v.Conjuncts { + // // TODO: preserve field information. + // expr, _ := stripNonDefaults(c.Expr()) + // w.AddConjunct(adt.MakeConjunct(c.Env, expr)) + // } + + // return makeValue(v.idx, w), true + + // if !stripped { + // return v, false + // } + + // n := *v.v + // n.Conjuncts = conjuncts + // return Value{v.idx, &n}, true + + // isDefault := false + // for _, c := range v.v.Conjuncts { + // if hasDisjunction(c.Expr()) { + // isDefault = true + // break + // } + // } + + // if !isDefault { + // return v, false + // } + + // TODO: record expanded disjunctions in output. + // - Rename Disjunction to DisjunctionExpr + // - Introduce Disjuncts with Values. + // - In Expr introduce Star + // - Don't pick default by default? + + // Evaluate the value. + // x := eval.FinalizeValue(v.idx.Runtime, v.v) + // if b, _ := x.Value.(*adt.Bottom); b != nil { // && b.IsIncomplete() { + // return v, false + // } + // // Finalize and return here. + // return Value{v.idx, x}, isDefault +} + +// TODO: this should go: record preexpanded disjunctions in Vertex. +func hasDisjunction(expr adt.Expr) bool { + switch x := expr.(type) { + case *adt.DisjunctionExpr: + return true + case *adt.Conjunction: + for _, v := range x.Values { + if hasDisjunction(v) { + return true + } + } + case *adt.BinaryExpr: + switch x.Op { + case adt.OrOp: + return true + case adt.AndOp: + return hasDisjunction(x.X) || hasDisjunction(x.Y) + } + } + return false +} + +// TODO: this should go: record preexpanded disjunctions in Vertex. +func stripNonDefaults(expr adt.Expr) (r adt.Expr, stripped bool) { + switch x := expr.(type) { + case *adt.DisjunctionExpr: + if !x.HasDefaults { + return x, false + } + d := *x + d.Values = []adt.Disjunct{} + for _, v := range x.Values { + if v.Default { + d.Values = append(d.Values, v) + } + } + if len(d.Values) == 1 { + return d.Values[0].Val, true + } + return &d, true + + case *adt.BinaryExpr: + if x.Op != adt.AndOp { + return x, false + } + a, sa := stripNonDefaults(x.X) + b, sb := stripNonDefaults(x.Y) + if sa || sb { + bin := *x + bin.X = a + bin.Y = b + return &bin, true + } + return x, false + + default: + return x, false + } +} + +// Label reports he label used to obtain this value from the enclosing struct. +// +// TODO: get rid of this somehow. Probably by including a FieldInfo struct +// or the like. +func (v hiddenValue) Label() (string, bool) { + if v.v == nil || v.v.Label == 0 { + return "", false + } + return v.idx.LabelStr(v.v.Label), true +} + +// Kind returns the kind of value. It returns BottomKind for atomic values that +// are not concrete. For instance, it will return BottomKind for the bounds +// >=0. +func (v Value) Kind() Kind { + if v.v == nil { + return BottomKind + } + c := v.v.BaseValue + if !v.v.IsConcrete() { + return BottomKind + } + // TODO: perhaps we should not consider open lists as "incomplete". + if v.IncompleteKind() == adt.ListKind && !v.v.IsClosedList() { + return BottomKind + } + return c.Kind() +} + +// IncompleteKind returns a mask of all kinds that this value may be. +func (v Value) IncompleteKind() Kind { + if v.v == nil { + return BottomKind + } + return v.v.Kind() +} + +// MarshalJSON marshalls this value into valid JSON. +func (v Value) MarshalJSON() (b []byte, err error) { + b, err = v.marshalJSON() + if err != nil { + return nil, unwrapJSONError(err) + } + return b, nil +} + +func (v Value) marshalJSON() (b []byte, err error) { + v, _ = v.Default() + if v.v == nil { + return json.Marshal(nil) + } + ctx := newContext(v.idx) + x := v.eval(ctx) + + if _, ok := x.(adt.Resolver); ok { + return nil, marshalErrf(v, x, adt.IncompleteError, "value %q contains unresolved references", str(ctx, x)) + } + if !adt.IsConcrete(x) { + return nil, marshalErrf(v, x, adt.IncompleteError, "cannot convert incomplete value %q to JSON", str(ctx, x)) + } + + // TODO: implement marshalles in value. + switch k := x.Kind(); k { + case adt.NullKind: + return json.Marshal(nil) + case adt.BoolKind: + return json.Marshal(x.(*adt.Bool).B) + case adt.IntKind, adt.FloatKind, adt.NumKind: + b, err := x.(*adt.Num).X.MarshalText() + b = bytes.TrimLeft(b, "+") + return b, err + case adt.StringKind: + return json.Marshal(x.(*adt.String).Str) + case adt.BytesKind: + return json.Marshal(x.(*adt.Bytes).B) + case adt.ListKind: + i, _ := v.List() + return marshalList(&i) + case adt.StructKind: + obj, err := v.structValData(ctx) + if err != nil { + return nil, toMarshalErr(v, err) + } + return obj.marshalJSON() + case adt.BottomKind: + return nil, toMarshalErr(v, x.(*adt.Bottom)) + default: + return nil, marshalErrf(v, x, 0, "cannot convert value %q of type %T to JSON", str(ctx, x), x) + } +} + +// Syntax converts the possibly partially evaluated value into syntax. This +// can use used to print the value with package format. +func (v Value) Syntax(opts ...Option) ast.Node { + // TODO: the default should ideally be simplified representation that + // exactly represents the value. The latter can currently only be + // ensured with Raw(). + if v.v == nil { + return nil + } + var o options = getOptions(opts) + // var inst *Instance + + p := export.Profile{ + Simplify: !o.raw, + TakeDefaults: o.final, + ShowOptional: !o.omitOptional && !o.concrete, + ShowDefinitions: !o.omitDefinitions && !o.concrete, + ShowHidden: !o.omitHidden && !o.concrete, + ShowAttributes: !o.omitAttrs, + ShowDocs: o.docs, + ShowErrors: o.showErrors, + } + + pkgID := v.instance().ID() + + bad := func(name string, err error) ast.Node { + const format = `"%s: internal error +Error: %s + +Profile: +%#v + +Value: +%v + +You could file a bug with the above information at: + https://cuelang.org/issues/new?assignees=&labels=NeedsInvestigation&template=bug_report.md&title=. +` + cg := &ast.CommentGroup{Doc: true} + msg := fmt.Sprintf(format, name, err, p, v) + for _, line := range strings.Split(msg, "\n") { + cg.List = append(cg.List, &ast.Comment{Text: "// " + line}) + } + x := &ast.BadExpr{} + ast.AddComment(x, cg) + return x + } + + // var expr ast.Expr + var err error + var f *ast.File + if o.concrete || o.final || o.resolveReferences { + // inst = v.instance() + var expr ast.Expr + expr, err = p.Value(v.idx, pkgID, v.v) + if err != nil { + return bad(`"cuelang.org/go/internal/core/export".Value`, err) + } + + // This introduces gratuitous unshadowing! + f, err = astutil.ToFile(expr) + if err != nil { + return bad(`"cuelang.org/go/ast/astutil".ToFile`, err) + } + // return expr + } else { + f, err = p.Def(v.idx, pkgID, v.v) + if err != nil { + return bad(`"cuelang.org/go/internal/core/export".Def`, err) + } + } + +outer: + for _, d := range f.Decls { + switch d.(type) { + case *ast.Package, *ast.ImportDecl: + return f + case *ast.CommentGroup, *ast.Attribute: + default: + break outer + } + } + + if len(f.Decls) == 1 { + if e, ok := f.Decls[0].(*ast.EmbedDecl); ok { + return e.Expr + } + } + return &ast.StructLit{ + Elts: f.Decls, + } +} + +// Doc returns all documentation comments associated with the field from which +// the current value originates. +func (v Value) Doc() []*ast.CommentGroup { + if v.v == nil { + return nil + } + return export.ExtractDoc(v.v) +} + +// Split returns a list of values from which v originated such that +// the unification of all these values equals v and for all returned values. +// It will also split unchecked unifications (embeddings), so unifying the +// split values may fail if actually unified. +// Source returns a non-nil value. +// +// Deprecated: use Expr. +func (v hiddenValue) Split() []Value { + if v.v == nil { + return nil + } + a := []Value{} + for _, x := range v.v.Conjuncts { + a = append(a, remakeValue(v, x.Env, x.Expr())) + } + return a +} + +// Source returns the original node for this value. The return value may not +// be a syntax.Expr. For instance, a struct kind may be represented by a +// struct literal, a field comprehension, or a file. It returns nil for +// computed nodes. Use Split to get all source values that apply to a field. +func (v Value) Source() ast.Node { + if v.v == nil { + return nil + } + if len(v.v.Conjuncts) == 1 { + return v.v.Conjuncts[0].Source() + } + return v.v.Value().Source() +} + +// Err returns the error represented by v or nil v is not an error. +func (v Value) Err() error { + if err := v.checkKind(v.ctx(), adt.BottomKind); err != nil { + return v.toErr(err) + } + return nil +} + +// Pos returns position information. +// +// Use v.Expr to get positions for all conjuncts and disjuncts. +func (v Value) Pos() token.Pos { + if v.v == nil { + return token.NoPos + } + + if src := v.Source(); src != nil { + if pos := src.Pos(); pos != token.NoPos { + return pos + } + } + // Pick the most-concrete field. + var p token.Pos + for _, c := range v.v.Conjuncts { + x := c.Elem() + pp := pos(x) + if pp == token.NoPos { + continue + } + p = pp + // Prefer struct conjuncts with actual fields. + if s, ok := x.(*adt.StructLit); ok && len(s.Fields) > 0 { + break + } + } + return p +} + +// TODO: IsFinal: this value can never be changed. + +// IsClosed reports whether a list of struct is closed. It reports false when +// when the value is not a list or struct. +// +// Deprecated: use Allows(AnyString) and Allows(AnyIndex) or Kind/IncompleteKind. +func (v hiddenValue) IsClosed() bool { + if v.v == nil { + return false + } + switch v.Kind() { + case ListKind: + return v.v.IsClosedList() + case StructKind: + return !v.Allows(AnyString) + } + return false +} + +// Allows reports whether a field with the given selector could be added to v. +// +// Allows does not take into account validators like list.MaxItems(4). This may +// change in the future. +func (v Value) Allows(sel Selector) bool { + c := v.ctx() + f := sel.sel.feature(c) + return v.v.Accept(c, f) +} + +// IsConcrete reports whether the current value is a concrete scalar value +// (not relying on default values), a terminal error, a list, or a struct. +// It does not verify that values of lists or structs are concrete themselves. +// To check whether there is a concrete default, use v.Default().IsConcrete(). +func (v Value) IsConcrete() bool { + if v.v == nil { + return false // any is neither concrete, not a list or struct. + } + if b, ok := v.v.BaseValue.(*adt.Bottom); ok { + return !b.IsIncomplete() + } + if !adt.IsConcrete(v.v) { + return false + } + if v.IncompleteKind() == adt.ListKind && !v.v.IsClosedList() { + return false + } + return true +} + +// // Deprecated: IsIncomplete +// // +// // It indicates that the value cannot be fully evaluated due to +// // insufficient information. +// func (v Value) IsIncomplete() bool { +// panic("deprecated") +// } + +// Exists reports whether this value existed in the configuration. +func (v Value) Exists() bool { + if v.v == nil { + return false + } + if err, ok := v.v.BaseValue.(*adt.Bottom); ok { + return !err.NotExists + } + return true +} + +func (v Value) checkKind(ctx *adt.OpContext, want adt.Kind) *adt.Bottom { + if v.v == nil { + return errNotExists + } + // TODO: use checkKind + x := v.eval(ctx) + if b, ok := x.(*adt.Bottom); ok { + return b + } + k := x.Kind() + if want != adt.BottomKind { + if k&want == adt.BottomKind { + return mkErr(v.idx, x, "cannot use value %v (type %s) as %s", + ctx.Str(x), k, want) + } + if !adt.IsConcrete(x) { + return mkErr(v.idx, x, adt.IncompleteError, "non-concrete value %v", k) + } + } + return nil +} + +func makeInt(v Value, x int64) Value { + n := &adt.Num{K: adt.IntKind} + n.X.SetInt64(int64(x)) + return remakeFinal(v, nil, n) +} + +// Len returns the number of items of the underlying value. +// For lists it reports the capacity of the list. For structs it indicates the +// number of fields, for bytes the number of bytes. +func (v Value) Len() Value { + if v.v != nil { + switch x := v.eval(v.ctx()).(type) { + case *adt.Vertex: + if x.IsList() { + n := &adt.Num{K: adt.IntKind} + n.X.SetInt64(int64(len(x.Elems()))) + if x.IsClosedList() { + return remakeFinal(v, nil, n) + } + // Note: this HAS to be a Conjunction value and cannot be + // an adt.BinaryExpr, as the expressions would be considered + // to be self-contained and unresolvable when evaluated + // (can never become concrete). + c := &adt.Conjunction{Values: []adt.Value{ + &adt.BasicType{K: adt.IntKind}, + &adt.BoundValue{Op: adt.GreaterEqualOp, Value: n}, + }} + return remakeFinal(v, nil, c) + + } + case *adt.Bytes: + return makeInt(v, int64(len(x.B))) + case *adt.String: + return makeInt(v, int64(len([]rune(x.Str)))) + } + } + const msg = "len not supported for type %v" + return remakeValue(v, nil, mkErr(v.idx, v.v, msg, v.Kind())) + +} + +// Elem returns the value of undefined element types of lists and structs. +// +// Deprecated: use LookupPath in combination with "AnyString" or "AnyIndex". +func (v hiddenValue) Elem() (Value, bool) { + sel := AnyString + if v.v.IsList() { + sel = AnyIndex + } + x := v.LookupPath(MakePath(sel)) + return x, x.Exists() +} + +// List creates an iterator over the values of a list or reports an error if +// v is not a list. +func (v Value) List() (Iterator, error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, adt.ListKind); err != nil { + return Iterator{idx: v.idx, ctx: ctx}, v.toErr(err) + } + arcs := []field{} + for _, a := range v.v.Elems() { + if a.Label.IsInt() { + arcs = append(arcs, field{arc: a}) + } + } + return Iterator{idx: v.idx, ctx: ctx, val: v, arcs: arcs}, nil +} + +// Null reports an error if v is not null. +func (v Value) Null() error { + v, _ = v.Default() + if err := v.checkKind(v.ctx(), adt.NullKind); err != nil { + return v.toErr(err) + } + return nil +} + +// // IsNull reports whether v is null. +// func (v Value) IsNull() bool { +// return v.Null() == nil +// } + +// Bool returns the bool value of v or false and an error if v is not a boolean. +func (v Value) Bool() (bool, error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, adt.BoolKind); err != nil { + return false, v.toErr(err) + } + return v.eval(ctx).(*adt.Bool).B, nil +} + +// String returns the string value if v is a string or an error otherwise. +func (v Value) String() (string, error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, adt.StringKind); err != nil { + return "", v.toErr(err) + } + return v.eval(ctx).(*adt.String).Str, nil +} + +// Bytes returns a byte slice if v represents a list of bytes or an error +// otherwise. +func (v Value) Bytes() ([]byte, error) { + v, _ = v.Default() + ctx := v.ctx() + switch x := v.eval(ctx).(type) { + case *adt.Bytes: + return append([]byte(nil), x.B...), nil + case *adt.String: + return []byte(x.Str), nil + } + return nil, v.toErr(v.checkKind(ctx, adt.BytesKind|adt.StringKind)) +} + +// Reader returns a new Reader if v is a string or bytes type and an error +// otherwise. +func (v hiddenValue) Reader() (io.Reader, error) { + v, _ = v.Default() + ctx := v.ctx() + switch x := v.eval(ctx).(type) { + case *adt.Bytes: + return bytes.NewReader(x.B), nil + case *adt.String: + return strings.NewReader(x.Str), nil + } + return nil, v.toErr(v.checkKind(ctx, adt.StringKind|adt.BytesKind)) +} + +// TODO: distinguish between optional, hidden, etc. Probably the best approach +// is to mark options in context and have a single function for creating +// a structVal. + +// structVal returns an structVal or an error if v is not a struct. +func (v Value) structValData(ctx *adt.OpContext) (structValue, *adt.Bottom) { + return v.structValOpts(ctx, options{ + omitHidden: true, + omitDefinitions: true, + omitOptional: true, + }) +} + +func (v Value) structValFull(ctx *adt.OpContext) (structValue, *adt.Bottom) { + return v.structValOpts(ctx, options{allowScalar: true}) +} + +// structVal returns an structVal or an error if v is not a struct. +func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err *adt.Bottom) { + v, _ = v.Default() + + obj := v.v + + switch b, ok := v.v.BaseValue.(*adt.Bottom); { + case ok && b.IsIncomplete() && !o.concrete && !o.final: + + // TODO: + // case o.allowScalar, !o.omitHidden, !o.omitDefinitions: + // Allow scalar values if hidden or definition fields are requested? + case o.allowScalar: + default: + obj, err = v.getStruct() + if err != nil { + return structValue{}, err + } + } + + features := export.VertexFeatures(ctx, obj) + + k := 0 + for _, f := range features { + if f.IsDef() && (o.omitDefinitions || o.concrete) { + continue + } + if f.IsHidden() && o.omitHidden { + continue + } + if arc := obj.Lookup(f); arc == nil { + if o.omitOptional { + continue + } + // ensure it really exists. + v := adt.Vertex{ + Parent: obj, + Label: f, + } + obj.MatchAndInsert(ctx, &v) + if len(v.Conjuncts) == 0 { + continue + } + } + features[k] = f + k++ + } + features = features[:k] + return structValue{ctx, v, obj, features}, nil +} + +// Struct returns the underlying struct of a value or an error if the value +// is not a struct. +func (v hiddenValue) Struct() (*Struct, error) { + // TODO: deprecate + ctx := v.ctx() + obj, err := v.structValOpts(ctx, options{}) + if err != nil { + return nil, v.toErr(err) + } + return &Struct{obj}, nil +} + +func (v Value) getStruct() (*adt.Vertex, *adt.Bottom) { + ctx := v.ctx() + if err := v.checkKind(ctx, adt.StructKind); err != nil { + if !err.ChildError { + return nil, err + } + } + return v.v, nil +} + +// Struct represents a CUE struct value. +type Struct struct { + structValue +} + +type hiddenStruct = Struct + +// FieldInfo contains information about a struct field. +type FieldInfo struct { + Selector string + Name string // Deprecated: use Selector + Pos int + Value Value + + IsDefinition bool + IsOptional bool + IsHidden bool +} + +func (s *hiddenStruct) Len() int { + return s.structValue.Len() +} + +// field reports information about the ith field, i < o.Len(). +func (s *hiddenStruct) Field(i int) FieldInfo { + a, opt := s.at(i) + ctx := s.v.ctx() + + v := makeChildValue(s.v, a) + name := s.v.idx.LabelStr(a.Label) + str := a.Label.SelectorString(ctx) + return FieldInfo{str, name, i, v, a.Label.IsDef(), opt, a.Label.IsHidden()} +} + +// FieldByName looks up a field for the given name. If isIdent is true, it will +// look up a definition or hidden field (starting with `_` or `_#`). Otherwise +// it interprets name as an arbitrary string for a regular field. +func (s *hiddenStruct) FieldByName(name string, isIdent bool) (FieldInfo, error) { + f := s.v.idx.Label(name, isIdent) + for i, a := range s.features { + if a == f { + return s.Field(i), nil + } + } + return FieldInfo{}, errNotFound +} + +// Fields creates an iterator over the Struct's fields. +func (s *hiddenStruct) Fields(opts ...Option) *Iterator { + iter, _ := s.v.Fields(opts...) + return iter +} + +// Fields creates an iterator over v's fields if v is a struct or an error +// otherwise. +func (v Value) Fields(opts ...Option) (*Iterator, error) { + o := options{omitDefinitions: true, omitHidden: true, omitOptional: true} + o.updateOptions(opts) + ctx := v.ctx() + obj, err := v.structValOpts(ctx, o) + if err != nil { + return &Iterator{idx: v.idx, ctx: ctx}, v.toErr(err) + } + + arcs := []field{} + for i := range obj.features { + arc, isOpt := obj.at(i) + arcs = append(arcs, field{arc: arc, isOptional: isOpt}) + } + return &Iterator{idx: v.idx, ctx: ctx, val: v, arcs: arcs}, nil +} + +// Lookup reports the value at a path starting from v. The empty path returns v +// itself. +// +// The Exists() method can be used to verify if the returned value existed. +// Lookup cannot be used to look up hidden or optional fields or definitions. +// +// Deprecated: use LookupPath. At some point before v1.0.0, this method will +// be removed to be reused eventually for looking up a selector. +func (v hiddenValue) Lookup(path ...string) Value { + ctx := v.ctx() + for _, k := range path { + // TODO(eval) TODO(error): always search in full data and change error + // message if a field is found but is of the incorrect type. + obj, err := v.structValData(ctx) + if err != nil { + // TODO: return a Value at the same location and a new error? + return newErrValue(v, err) + } + v = obj.Lookup(k) + } + return v +} + +// Path returns the path to this value from the root of an Instance. +// +// This is currently only defined for values that have a fixed path within +// a configuration, and thus not those that are derived from Elem, Template, +// or programmatically generated values such as those returned by Unify. +func (v Value) Path() Path { + if v.v == nil { + return Path{} + } + return Path{path: appendPath(nil, v)} +} + +// Path computes the sequence of Features leading from the root to of the +// instance to this Vertex. +func appendPath(a []Selector, v Value) []Selector { + if p := v.parent(); p.v != nil { + a = appendPath(a, p) + } + + if v.v.Label == 0 { + // A Label may be 0 for programmatically inserted nodes. + return a + } + + f := v.v.Label + if index := f.Index(); index == adt.MaxIndex { + return append(a, Selector{anySelector(f)}) + } + + var sel selector + switch f.Typ() { + case adt.IntLabel: + sel = indexSelector(f) + case adt.DefinitionLabel: + sel = definitionSelector(f.SelectorString(v.idx)) + + case adt.HiddenDefinitionLabel, adt.HiddenLabel: + sel = scopedSelector{ + name: f.IdentString(v.idx), + pkg: f.PkgID(v.idx), + } + + case adt.StringLabel: + sel = stringSelector(f.StringValue(v.idx)) + } + return append(a, Selector{sel}) +} + +// LookupDef is equal to LookupPath(MakePath(Def(name))). +// +// Deprecated: use LookupPath. +func (v hiddenValue) LookupDef(name string) Value { + return v.LookupPath(MakePath(Def(name))) +} + +var errNotFound = errors.Newf(token.NoPos, "field not found") + +// FieldByName looks up a field for the given name. If isIdent is true, it will +// look up a definition or hidden field (starting with `_` or `_#`). Otherwise +// it interprets name as an arbitrary string for a regular field. +// +// Deprecated: use LookupPath. +func (v hiddenValue) FieldByName(name string, isIdent bool) (f FieldInfo, err error) { + s, err := v.Struct() + if err != nil { + return f, err + } + return s.FieldByName(name, isIdent) +} + +// LookupField reports information about a field of v. +// +// Deprecated: use LookupPath +func (v hiddenValue) LookupField(name string) (FieldInfo, error) { + s, err := v.Struct() + if err != nil { + // TODO: return a Value at the same location and a new error? + return FieldInfo{}, err + } + f, err := s.FieldByName(name, true) + if err != nil { + return f, err + } + if f.IsHidden { + return f, errNotFound + } + return f, err +} + +// TODO: expose this API? +// +// // EvalExpr evaluates an expression within the scope of v, which must be +// // a struct. +// // +// // Expressions may refer to builtin packages if they can be uniquely identified. +// func (v Value) EvalExpr(expr ast.Expr) Value { +// ctx := v.ctx() +// result := evalExpr(ctx, v.eval(ctx), expr) +// return newValueRoot(ctx, result) +// } + +// Fill creates a new value by unifying v with the value of x at the given path. +// +// Values may be any Go value that can be converted to CUE, an ast.Expr or +// a Value. In the latter case, it will panic if the Value is not from the same +// Runtime. +// +// Any reference in v referring to the value at the given path will resolve +// to x in the newly created value. The resulting value is not validated. +// +// Deprecated: use FillPath. +func (v hiddenValue) Fill(x interface{}, path ...string) Value { + if v.v == nil { + return v + } + selectors := make([]Selector, len(path)) + for i, p := range path { + selectors[i] = Str(p) + } + return v.FillPath(MakePath(selectors...), x) +} + +// FillPath creates a new value by unifying v with the value of x at the given +// path. +// +// If x is an cue/ast.Expr, it will be evaluated within the context of the +// given path: identifiers that are not resolved within the expression are +// resolved as if they were defined at the path position. +// +// If x is a Value, it will be used as is. It panics if x is not created +// from the same Runtime as v. +// +// Otherwise, the given Go value will be converted to CUE using the same rules +// as Context.Encode. +// +// Any reference in v referring to the value at the given path will resolve to x +// in the newly created value. The resulting value is not validated. +// +func (v Value) FillPath(p Path, x interface{}) Value { + if v.v == nil { + // TODO: panic here? + return v + } + ctx := v.ctx() + if err := p.Err(); err != nil { + return newErrValue(v, mkErr(v.idx, nil, 0, "invalid path: %v", err)) + } + var expr adt.Expr + switch x := x.(type) { + case Value: + if v.idx != x.idx { + panic("values are not from the same runtime") + } + expr = x.v + case ast.Expr: + n := getScopePrefix(v, p) + // TODO: inject import path of current package? + expr = resolveExpr(ctx, n, x) + default: + expr = convert.GoValueToValue(ctx, x, true) + } + for i := len(p.path) - 1; i >= 0; i-- { + switch sel := p.path[i].sel; { + case sel == AnyString.sel: + expr = &adt.StructLit{Decls: []adt.Decl{ + &adt.BulkOptionalField{ + Filter: &adt.BasicType{K: adt.StringKind}, + Value: expr, + }, + }} + + case sel == anyIndex.sel: + expr = &adt.ListLit{Elems: []adt.Elem{ + &adt.Ellipsis{Value: expr}, + }} + + case sel == anyDefinition.sel: + expr = &adt.Bottom{Err: errors.Newf(token.NoPos, + "AnyDefinition not supported")} + + case sel.kind() == adt.IntLabel: + i := sel.feature(ctx.Runtime).Index() + list := &adt.ListLit{} + any := &adt.Top{} + // TODO(perf): make this a constant thing. This will be possible with the query extension. + for k := 0; k < i; k++ { + list.Elems = append(list.Elems, any) + } + list.Elems = append(list.Elems, expr, &adt.Ellipsis{}) + expr = list + + default: + var d adt.Decl + if sel.optional() { + d = &adt.OptionalField{ + Label: sel.feature(v.idx), + Value: expr, + } + } else { + d = &adt.Field{ + Label: sel.feature(v.idx), + Value: expr, + } + } + expr = &adt.StructLit{Decls: []adt.Decl{d}} + } + } + n := &adt.Vertex{} + n.AddConjunct(adt.MakeRootConjunct(nil, expr)) + n.Finalize(ctx) + w := makeValue(v.idx, n, v.parent_) + return v.Unify(w) +} + +// Template returns a function that represents the template definition for a +// struct in a configuration file. It returns nil if v is not a struct kind or +// if there is no template associated with the struct. +// +// The returned function returns the value that would be unified with field +// given its name. +// +// Deprecated: use LookupPath in combination with using optional selectors. +func (v hiddenValue) Template() func(label string) Value { + if v.v == nil { + return nil + } + + types := v.v.OptionalTypes() + if types&(adt.HasAdditional|adt.HasPattern) == 0 { + return nil + } + + return func(label string) Value { + return v.LookupPath(MakePath(Str(label).Optional())) + } +} + +// Subsume reports nil when w is an instance of v or an error otherwise. +// +// Without options, the entire value is considered for assumption, which means +// Subsume tests whether v is a backwards compatible (newer) API version of w. +// +// Use the Final option to check subsumption if a w is known to be final, and +// should assumed to be closed. +// +// Use the Raw option to do a low-level subsumption, taking defaults into +// account. +// +// Value v and w must be obtained from the same build. TODO: remove this +// requirement. +func (v Value) Subsume(w Value, opts ...Option) error { + o := getOptions(opts) + p := subsume.CUE + switch { + case o.final && o.ignoreClosedness: + p = subsume.FinalOpen + case o.final: + p = subsume.Final + case o.ignoreClosedness: + p = subsume.API + } + if !o.raw { + p.Defaults = true + } + ctx := v.ctx() + return p.Value(ctx, v.v, w.v) +} + +// Deprecated: use Subsume. +// +// Subsumes reports whether w is an instance of v. +// +// Without options, Subsumes checks whether v is a backwards compatbile schema +// of w. +// +// By default, Subsumes tests whether two values are compatible +// Value v and w must be obtained from the same build. +// TODO: remove this requirement. +func (v hiddenValue) Subsumes(w Value) bool { + ctx := v.ctx() + p := subsume.Profile{Defaults: true} + return p.Check(ctx, v.v, w.v) +} + +func allowed(ctx *adt.OpContext, parent, n *adt.Vertex) *adt.Bottom { + if !parent.IsClosedList() && !parent.IsClosedStruct() { + return nil + } + + for _, a := range n.Arcs { + if !parent.Accept(ctx, a.Label) { + defer ctx.PopArc(ctx.PushArc(parent)) + label := a.Label.SelectorString(ctx) + parent.Accept(ctx, a.Label) + return ctx.NewErrf("field not allowed: %s", label) + } + } + return nil +} + +func addConjuncts(dst, src *adt.Vertex) { + c := adt.MakeRootConjunct(nil, src) + if src.Closed { + var root adt.CloseInfo + c.CloseInfo = root.SpawnRef(src, src.Closed, nil) + } + dst.AddConjunct(c) +} + +// Unify reports the greatest lower bound of v and w. +// +// Value v and w must be obtained from the same build. +// TODO: remove this requirement. +func (v Value) Unify(w Value) Value { + if v.v == nil { + return w + } + if w.v == nil || w.v == v.v { + return v + } + + n := &adt.Vertex{} + addConjuncts(n, v.v) + addConjuncts(n, w.v) + + ctx := newContext(v.idx) + n.Finalize(ctx) + + n.Parent = v.v.Parent + n.Label = v.v.Label + n.Closed = v.v.Closed || w.v.Closed + + if err := n.Err(ctx, adt.Finalized); err != nil { + return makeValue(v.idx, n, v.parent_) + } + if err := allowed(ctx, v.v, n); err != nil { + return newErrValue(w, err) + } + if err := allowed(ctx, w.v, n); err != nil { + return newErrValue(v, err) + } + + return makeValue(v.idx, n, v.parent_) +} + +// UnifyAccept is as v.Unify(w), but will disregard any field that is allowed +// in the Value accept. +func (v Value) UnifyAccept(w Value, accept Value) Value { + if v.v == nil { + return w + } + if w.v == nil { + return v + } + if accept.v == nil { + panic("accept must exist") + } + + n := &adt.Vertex{} + n.AddConjunct(adt.MakeRootConjunct(nil, v.v)) + n.AddConjunct(adt.MakeRootConjunct(nil, w.v)) + + ctx := newContext(v.idx) + n.Finalize(ctx) + + n.Parent = v.v.Parent + n.Label = v.v.Label + + if err := n.Err(ctx, adt.Finalized); err != nil { + return makeValue(v.idx, n, v.parent_) + } + if err := allowed(ctx, accept.v, n); err != nil { + return newErrValue(accept, err) + } + + return makeValue(v.idx, n, v.parent_) +} + +// Equals reports whether two values are equal, ignoring optional fields. +// The result is undefined for incomplete values. +func (v Value) Equals(other Value) bool { + if v.v == nil || other.v == nil { + return false + } + return adt.Equal(v.ctx(), v.v, other.v, 0) +} + +func (v Value) instance() *Instance { + if v.v == nil { + return nil + } + return getImportFromNode(v.idx, v.v) +} + +// Reference returns the instance and path referred to by this value such that +// inst.Lookup(path) resolves to the same value, or no path if this value is not +// a reference. If a reference contains index selection (foo[bar]), it will +// only return a reference if the index resolves to a concrete value. +// +// Deprecated: use ReferencePath +func (v hiddenValue) Reference() (inst *Instance, path []string) { + root, p := v.ReferencePath() + if !root.Exists() { + return nil, nil + } + + inst = getImportFromNode(v.idx, root.v) + for _, sel := range p.Selectors() { + switch x := sel.sel.(type) { + case stringSelector: + path = append(path, string(x)) + default: + path = append(path, sel.String()) + } + } + + return inst, path +} + +// ReferencePath returns the value and path referred to by this value such that +// value.LookupPath(path) resolves to the same value, or no path if this value +// is not a reference. +func (v Value) ReferencePath() (root Value, p Path) { + // TODO: don't include references to hidden fields. + if v.v == nil || len(v.v.Conjuncts) != 1 { + return Value{}, Path{} + } + ctx := v.ctx() + c := v.v.Conjuncts[0] + + x, path := reference(v.idx, ctx, c.Env, c.Expr()) + if x == nil { + return Value{}, Path{} + } + // NOTE: due to structure sharing, the path of the referred node may end + // up different from the one explicitly pointed to. The value will be the + // same, but the scope may differ. + // TODO(structureshare): see if we can construct the original path. This + // only has to be done if structures are being shared. + return makeValue(v.idx, x, nil), Path{path: path} +} + +func reference(rt *runtime.Runtime, c *adt.OpContext, env *adt.Environment, r adt.Expr) (inst *adt.Vertex, path []Selector) { + ctx := c + defer ctx.PopState(ctx.PushState(env, r.Source())) + + switch x := r.(type) { + // TODO: do we need to handle Vertex as well, in case this is hard-wired? + // Probably not, as this results from dynamic content. + + case *adt.NodeLink: + // TODO: consider getting rid of NodeLink. + inst, path = mkPath(rt, nil, x.Node) + + case *adt.FieldReference: + env := ctx.Env(x.UpCount) + inst, path = mkPath(rt, nil, env.Vertex) + path = appendSelector(path, featureToSel(x.Label, rt)) + + case *adt.LabelReference: + env := ctx.Env(x.UpCount) + return mkPath(rt, nil, env.Vertex) + + case *adt.DynamicReference: + env := ctx.Env(x.UpCount) + inst, path = mkPath(rt, nil, env.Vertex) + v, _ := ctx.Evaluate(env, x.Label) + path = appendSelector(path, valueToSel(v)) + + case *adt.ImportReference: + inst = rt.LoadImport(rt.LabelStr(x.ImportPath)) + + case *adt.SelectorExpr: + inst, path = reference(rt, c, env, x.X) + path = appendSelector(path, featureToSel(x.Sel, rt)) + + case *adt.IndexExpr: + inst, path = reference(rt, c, env, x.X) + v, _ := ctx.Evaluate(env, x.Index) + path = appendSelector(path, valueToSel(v)) + } + if inst == nil { + return nil, nil + } + return inst, path +} + +func mkPath(r *runtime.Runtime, a []Selector, v *adt.Vertex) (root *adt.Vertex, path []Selector) { + if v.Parent == nil { + return v, a + } + root, path = mkPath(r, a, v.Parent) + path = appendSelector(path, featureToSel(v.Label, r)) + return root, path +} + +type options struct { + concrete bool // enforce that values are concrete + raw bool // show original values + hasHidden bool + omitHidden bool + omitDefinitions bool + omitOptional bool + omitAttrs bool + resolveReferences bool + showErrors bool + final bool + ignoreClosedness bool // used for comparing APIs + docs bool + disallowCycles bool // implied by concrete + allowScalar bool +} + +// An Option defines modes of evaluation. +type Option option + +type option func(p *options) + +// Final indicates a value is final. It implicitly closes all structs and lists +// in a value and selects defaults. +func Final() Option { + return func(o *options) { + o.final = true + o.omitDefinitions = true + o.omitOptional = true + o.omitHidden = true + } +} + +// Schema specifies the input is a Schema. Used by Subsume. +func Schema() Option { + return func(o *options) { + o.ignoreClosedness = true + } +} + +// Concrete ensures that all values are concrete. +// +// For Validate this means it returns an error if this is not the case. +// In other cases a non-concrete value will be replaced with an error. +func Concrete(concrete bool) Option { + return func(p *options) { + if concrete { + p.concrete = true + p.final = true + if !p.hasHidden { + p.omitHidden = true + p.omitDefinitions = true + } + } + } +} + +// DisallowCycles forces validation in the precense of cycles, even if +// non-concrete values are allowed. This is implied by Concrete(true). +func DisallowCycles(disallow bool) Option { + return func(p *options) { p.disallowCycles = disallow } +} + +// ResolveReferences forces the evaluation of references when outputting. +// This implies the input cannot have cycles. +func ResolveReferences(resolve bool) Option { + return func(p *options) { + p.resolveReferences = resolve + + // ResolveReferences is implemented as a Value printer, rather than + // a definition printer, even though it should be more like the latter. + // To reflect this we convert incomplete errors to their original + // expression. + // + // TODO: ShowErrors mostly shows incomplete errors, even though this is + // just an approximation. There seems to be some inconsistencies as to + // when child errors are marked as such, making the conversion somewhat + // inconsistent. This option is conservative, though. + p.showErrors = true + } +} + +// Raw tells Syntax to generate the value as is without any simplifications. +func Raw() Option { + return func(p *options) { p.raw = true } +} + +// All indicates that all fields and values should be included in processing +// even if they can be elided or omitted. +func All() Option { + return func(p *options) { + p.omitAttrs = false + p.omitHidden = false + p.omitDefinitions = false + p.omitOptional = false + } +} + +// Docs indicates whether docs should be included. +func Docs(include bool) Option { + return func(p *options) { p.docs = true } +} + +// Definitions indicates whether definitions should be included. +// +// Definitions may still be included for certain functions if they are referred +// to by other other values. +func Definitions(include bool) Option { + return func(p *options) { + p.hasHidden = true + p.omitDefinitions = !include + } +} + +// Hidden indicates that definitions and hidden fields should be included. +func Hidden(include bool) Option { + return func(p *options) { + p.hasHidden = true + p.omitHidden = !include + p.omitDefinitions = !include + } +} + +// Optional indicates that optional fields should be included. +func Optional(include bool) Option { + return func(p *options) { p.omitOptional = !include } +} + +// Attributes indicates that attributes should be included. +func Attributes(include bool) Option { + return func(p *options) { p.omitAttrs = !include } +} + +func getOptions(opts []Option) (o options) { + o.updateOptions(opts) + return +} + +func (o *options) updateOptions(opts []Option) { + for _, fn := range opts { + fn(o) + } +} + +// Validate reports any errors, recursively. The returned error may represent +// more than one error, retrievable with errors.Errors, if more than one +// exists. +func (v Value) Validate(opts ...Option) error { + o := options{} + o.updateOptions(opts) + + cfg := &validate.Config{ + Concrete: o.concrete, + DisallowCycles: o.disallowCycles, + AllErrors: true, + } + + b := validate.Validate(v.ctx(), v.v, cfg) + if b != nil { + return b.Err + } + return nil +} + +// Walk descends into all values of v, calling f. If f returns false, Walk +// will not descent further. It only visits values that are part of the data +// model, so this excludes optional fields, hidden fields, and definitions. +func (v Value) Walk(before func(Value) bool, after func(Value)) { + ctx := v.ctx() + switch v.Kind() { + case StructKind: + if before != nil && !before(v) { + return + } + obj, _ := v.structValData(ctx) + for i := 0; i < obj.Len(); i++ { + _, v := obj.At(i) + v.Walk(before, after) + } + case ListKind: + if before != nil && !before(v) { + return + } + list, _ := v.List() + for list.Next() { + list.Value().Walk(before, after) + } + default: + if before != nil { + before(v) + } + } + if after != nil { + after(v) + } +} + +// Expr reports the operation of the underlying expression and the values it +// operates on. +// +// For unary expressions, it returns the single value of the expression. +// +// For binary expressions it returns first the left and right value, in that +// order. For associative operations however, (for instance '&' and '|'), it may +// return more than two values, where the operation is to be applied in +// sequence. +// +// For selector and index expressions it returns the subject and then the index. +// For selectors, the index is the string value of the identifier. +// +// For interpolations it returns a sequence of values to be concatenated, some +// of which will be literal strings and some unevaluated expressions. +// +// A builtin call expression returns the value of the builtin followed by the +// args of the call. +func (v Value) Expr() (Op, []Value) { + // TODO: return v if this is complete? Yes for now + if v.v == nil { + return NoOp, nil + } + + var expr adt.Expr + var env *adt.Environment + + if v.v.IsData() { + expr = v.v.Value() + + } else { + switch len(v.v.Conjuncts) { + case 0: + if v.v.BaseValue == nil { + return NoOp, []Value{makeValue(v.idx, v.v, v.parent_)} // TODO: v? + } + expr = v.v.Value() + + case 1: + // the default case, processed below. + c := v.v.Conjuncts[0] + env = c.Env + expr = c.Expr() + if w, ok := expr.(*adt.Vertex); ok { + return Value{v.idx, w, v.parent_}.Expr() + } + + default: + a := []Value{} + ctx := v.ctx() + for _, c := range v.v.Conjuncts { + // Keep parent here. TODO: do we need remove the requirement + // from other conjuncts? + n := &adt.Vertex{ + Parent: v.v.Parent, + Label: v.v.Label, + } + n.AddConjunct(c) + n.Finalize(ctx) + a = append(a, makeValue(v.idx, n, v.parent_)) + } + return adt.AndOp, a + } + } + + // TODO: replace appends with []Value{}. For not leave. + a := []Value{} + op := NoOp + switch x := expr.(type) { + case *adt.BinaryExpr: + a = append(a, remakeValue(v, env, x.X)) + a = append(a, remakeValue(v, env, x.Y)) + op = x.Op + case *adt.UnaryExpr: + a = append(a, remakeValue(v, env, x.X)) + op = x.Op + case *adt.BoundExpr: + a = append(a, remakeValue(v, env, x.Expr)) + op = x.Op + case *adt.BoundValue: + a = append(a, remakeValue(v, env, x.Value)) + op = x.Op + case *adt.Conjunction: + // pre-expanded unification + for _, conjunct := range x.Values { + a = append(a, remakeValue(v, env, conjunct)) + } + op = AndOp + case *adt.Disjunction: + count := 0 + outer: + for i, disjunct := range x.Values { + if i < x.NumDefaults { + for _, n := range x.Values[x.NumDefaults:] { + if subsume.Simplify.Value(v.ctx(), n, disjunct) == nil { + continue outer + } + } + } + count++ + a = append(a, remakeValue(v, env, disjunct)) + } + if count > 1 { + op = OrOp + } + + case *adt.DisjunctionExpr: + // Filter defaults that are subsumed by another value. + count := 0 + outerExpr: + for _, disjunct := range x.Values { + if disjunct.Default { + for _, n := range x.Values { + a := adt.Vertex{ + Label: v.v.Label, + } + b := a + a.AddConjunct(adt.MakeRootConjunct(env, n.Val)) + b.AddConjunct(adt.MakeRootConjunct(env, disjunct.Val)) + + ctx := eval.NewContext(v.idx, nil) + ctx.Unify(&a, adt.Finalized) + ctx.Unify(&b, adt.Finalized) + if allowed(ctx, v.v, &b) != nil { + // Everything subsumed bottom + continue outerExpr + } + if allowed(ctx, v.v, &a) != nil { + // An error doesn't subsume anything except another error. + continue + } + a.Parent = v.v.Parent + if !n.Default && subsume.Simplify.Value(ctx, &a, &b) == nil { + continue outerExpr + } + } + } + count++ + a = append(a, remakeValue(v, env, disjunct.Val)) + } + if count > 1 { + op = adt.OrOp + } + + case *adt.Interpolation: + for _, p := range x.Parts { + a = append(a, remakeValue(v, env, p)) + } + op = InterpolationOp + + case *adt.FieldReference: + // TODO: allow hard link + ctx := v.ctx() + f := ctx.PushState(env, x.Src) + env := ctx.Env(x.UpCount) + a = append(a, remakeValue(v, nil, &adt.NodeLink{Node: env.Vertex})) + a = append(a, remakeValue(v, nil, ctx.NewString(x.Label.SelectorString(ctx)))) + _ = ctx.PopState(f) + op = SelectorOp + + case *adt.SelectorExpr: + a = append(a, remakeValue(v, env, x.X)) + // A string selector is quoted. + a = append(a, remakeValue(v, env, &adt.String{ + Str: x.Sel.SelectorString(v.idx), + })) + op = SelectorOp + + case *adt.IndexExpr: + a = append(a, remakeValue(v, env, x.X)) + a = append(a, remakeValue(v, env, x.Index)) + op = IndexOp + case *adt.SliceExpr: + a = append(a, remakeValue(v, env, x.X)) + a = append(a, remakeValue(v, env, x.Lo)) + a = append(a, remakeValue(v, env, x.Hi)) + op = SliceOp + case *adt.CallExpr: + // Interpret "and" and "or" builtin semantically. + if fn, ok := x.Fun.(*adt.Builtin); ok && len(x.Args) == 1 && + (fn.Name == "or" || fn.Name == "and") { + + iter, _ := remakeValue(v, env, x.Args[0]).List() + for iter.Next() { + a = append(a, iter.Value()) + } + + op = OrOp + if fn.Name == "and" { + op = AndOp + } + + if len(a) == 0 { + // Mimic semantics of builtin. + switch op { + case AndOp: + a = append(a, remakeValue(v, env, &adt.Top{})) + case OrOp: + a = append(a, remakeValue(v, env, &adt.Bottom{ + Code: adt.IncompleteError, + Err: errors.Newf(x.Src.Fun.Pos(), "empty list in call to or"), + })) + } + op = NoOp + } + break + } + a = append(a, remakeValue(v, env, x.Fun)) + for _, arg := range x.Args { + a = append(a, remakeValue(v, env, arg)) + } + op = CallOp + case *adt.BuiltinValidator: + a = append(a, remakeValue(v, env, x.Builtin)) + for _, arg := range x.Args { + a = append(a, remakeValue(v, env, arg)) + } + op = CallOp + + case *adt.StructLit: + hasEmbed := false + fields := []adt.Decl{} + for _, d := range x.Decls { + switch d.(type) { + default: + fields = append(fields, d) + case adt.Value: + fields = append(fields, d) + case adt.Expr: + hasEmbed = true + } + } + + if !hasEmbed { + a = append(a, v) + break + } + + ctx := v.ctx() + + n := v.v + + if len(fields) > 0 { + n = &adt.Vertex{ + Parent: v.v.Parent, + Label: v.v.Label, + } + + s := &adt.StructLit{} + if k := v.v.Kind(); k != adt.StructKind && k != BottomKind { + // TODO: we should also add such a declaration for embeddings + // of structs with definitions. However, this is currently + // also not supported at the CUE level. If we do, it may be + // best handled with a special mode of unification. + s.Decls = append(s.Decls, &adt.BasicType{K: k}) + } + s.Decls = append(s.Decls, fields...) + c := adt.MakeRootConjunct(env, s) + n.AddConjunct(c) + n.Finalize(ctx) + n.Parent = v.v.Parent + } + + // Simulate old embeddings. + envEmbed := &adt.Environment{ + Up: env, + Vertex: n, + } + + for _, d := range x.Decls { + switch x := d.(type) { + case adt.Value: + case adt.Expr: + // embedding + n := &adt.Vertex{Label: v.v.Label} + c := adt.MakeRootConjunct(envEmbed, x) + n.AddConjunct(c) + n.Finalize(ctx) + n.Parent = v.v.Parent + a = append(a, makeValue(v.idx, n, v.parent_)) + } + } + + // Could be done earlier, but keep struct with fields at end. + if len(fields) > 0 { + a = append(a, makeValue(v.idx, n, v.parent_)) + } + + if len(a) == 1 { + return a[0].Expr() + } + op = adt.AndOp + + default: + a = append(a, v) + } + return op, a +} diff --git a/vendor/cuelang.org/go/encoding/json/json.go b/vendor/cuelang.org/go/encoding/json/json.go new file mode 100644 index 0000000000..0e2c7f7cf1 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/json/json.go @@ -0,0 +1,265 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package json converts JSON to and from CUE. +package json + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/value" +) + +// Valid reports whether data is a valid JSON encoding. +func Valid(b []byte) bool { + return json.Valid(b) +} + +// Validate validates JSON and confirms it matches the constraints +// specified by v. +func Validate(b []byte, v cue.Value) error { + if !json.Valid(b) { + return fmt.Errorf("json: invalid JSON") + } + r := value.ConvertToRuntime(v.Context()) + inst, err := r.Compile("json.Validate", b) + if err != nil { + return err + } + + v = v.Unify(inst.Value()) + if v.Err() != nil { + return v.Err() + } + return nil +} + +// Extract parses JSON-encoded data to a CUE expression, using path for +// position information. +func Extract(path string, data []byte) (ast.Expr, error) { + expr, err := extract(path, data) + if err != nil { + return nil, err + } + patchExpr(expr) + return expr, nil +} + +// Decode parses JSON-encoded data to a CUE value, using path for position +// information. +// +// Deprecated: use Extract and build using cue.Context.BuildExpr. +func Decode(r *cue.Runtime, path string, data []byte) (*cue.Instance, error) { + expr, err := extract(path, data) + if err != nil { + return nil, err + } + return r.CompileExpr(expr) +} + +func extract(path string, b []byte) (ast.Expr, error) { + expr, err := parser.ParseExpr(path, b) + if err != nil || !json.Valid(b) { + p := token.NoPos + if pos := errors.Positions(err); len(pos) > 0 { + p = pos[0] + } + var x interface{} + err := json.Unmarshal(b, &x) + return nil, errors.Wrapf(err, p, "invalid JSON for file %q", path) + } + return expr, nil +} + +// NewDecoder configures a JSON decoder. The path is used to associate position +// information with each node. The runtime may be nil if the decoder +// is only used to extract to CUE ast objects. +// +// The runtime may be nil if Decode isn't used. +func NewDecoder(r *cue.Runtime, path string, src io.Reader) *Decoder { + return &Decoder{ + r: r, + path: path, + dec: json.NewDecoder(src), + offset: 1, + } +} + +// A Decoder converts JSON values to CUE. +type Decoder struct { + r *cue.Runtime + path string + dec *json.Decoder + offset int +} + +// Extract converts the current JSON value to a CUE ast. It returns io.EOF +// if the input has been exhausted. +func (d *Decoder) Extract() (ast.Expr, error) { + expr, err := d.extract() + if err != nil { + return expr, err + } + patchExpr(expr) + return expr, nil +} + +func (d *Decoder) extract() (ast.Expr, error) { + var raw json.RawMessage + err := d.dec.Decode(&raw) + if err == io.EOF { + return nil, err + } + offset := d.offset + d.offset += len(raw) + if err != nil { + pos := token.NewFile(d.path, offset, len(raw)).Pos(0, 0) + return nil, errors.Wrapf(err, pos, "invalid JSON for file %q", d.path) + } + expr, err := parser.ParseExpr(d.path, []byte(raw), parser.FileOffset(offset)) + if err != nil { + return nil, err + } + return expr, nil +} + +// Decode converts the current JSON value to a CUE instance. It returns io.EOF +// if the input has been exhausted. +// +// Deprecated: use Extract and build with cue.Context.BuildExpr. +func (d *Decoder) Decode() (*cue.Instance, error) { + expr, err := d.Extract() + if err != nil { + return nil, err + } + return d.r.CompileExpr(expr) +} + +// patchExpr simplifies the AST parsed from JSON. +// TODO: some of the modifications are already done in format, but are +// a package deal of a more aggressive simplify. Other pieces of modification +// should probably be moved to format. +func patchExpr(n ast.Node) { + type info struct { + reflow bool + } + stack := []info{{true}} + + afterFn := func(n ast.Node) { + switch n.(type) { + case *ast.ListLit, *ast.StructLit: + stack = stack[:len(stack)-1] + } + } + + var beforeFn func(n ast.Node) bool + + beforeFn = func(n ast.Node) bool { + isLarge := n.End().Offset()-n.Pos().Offset() > 50 + descent := true + + switch x := n.(type) { + case *ast.ListLit: + reflow := true + if !isLarge { + for _, e := range x.Elts { + if hasSpaces(e) { + reflow = false + break + } + } + } + stack = append(stack, info{reflow}) + if reflow { + x.Lbrack = x.Lbrack.WithRel(token.NoRelPos) + x.Rbrack = x.Rbrack.WithRel(token.NoRelPos) + } + return true + + case *ast.StructLit: + reflow := true + if !isLarge { + for _, e := range x.Elts { + if f, ok := e.(*ast.Field); !ok || hasSpaces(f) || hasSpaces(f.Value) { + reflow = false + break + } + } + } + stack = append(stack, info{reflow}) + if reflow { + x.Lbrace = x.Lbrace.WithRel(token.NoRelPos) + x.Rbrace = x.Rbrace.WithRel(token.NoRelPos) + } + return true + + case *ast.Field: + // label is always a string for JSON. + switch { + case true: + s, ok := x.Label.(*ast.BasicLit) + if !ok || s.Kind != token.STRING { + break // should not happen: implies invalid JSON + } + + u, err := literal.Unquote(s.Value) + if err != nil { + break // should not happen: implies invalid JSON + } + + // TODO(legacy): remove checking for '_' prefix once hidden + // fields are removed. + if !ast.IsValidIdent(u) || strings.HasPrefix(u, "_") { + break // keep string + } + + x.Label = ast.NewIdent(u) + astutil.CopyMeta(x.Label, s) + } + ast.Walk(x.Value, beforeFn, afterFn) + descent = false + + case *ast.BasicLit: + if x.Kind == token.STRING && len(x.Value) > 10 { + s, err := literal.Unquote(x.Value) + if err != nil { + break // should not happen: implies invalid JSON + } + + x.Value = literal.String.WithOptionalTabIndent(len(stack)).Quote(s) + } + } + + if stack[len(stack)-1].reflow { + ast.SetRelPos(n, token.NoRelPos) + } + return descent + } + + ast.Walk(n, beforeFn, afterFn) +} + +func hasSpaces(n ast.Node) bool { + return n.Pos().RelPos() > token.NoSpace +} diff --git a/vendor/cuelang.org/go/internal/astinternal/debugstr.go b/vendor/cuelang.org/go/internal/astinternal/debugstr.go new file mode 100644 index 0000000000..0121b458ea --- /dev/null +++ b/vendor/cuelang.org/go/internal/astinternal/debugstr.go @@ -0,0 +1,281 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astinternal + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +func DebugStr(x interface{}) (out string) { + if n, ok := x.(ast.Node); ok { + comments := "" + for _, g := range n.Comments() { + comments += DebugStr(g) + } + if comments != "" { + defer func() { out = "<" + comments + out + ">" }() + } + } + switch v := x.(type) { + case *ast.File: + out := "" + out += DebugStr(v.Decls) + return out + + case *ast.Package: + out := "package " + out += DebugStr(v.Name) + return out + + case *ast.LetClause: + out := "let " + out += DebugStr(v.Ident) + out += "=" + out += DebugStr(v.Expr) + return out + + case *ast.Alias: + out := DebugStr(v.Ident) + out += "=" + out += DebugStr(v.Expr) + return out + + case *ast.BottomLit: + return "_|_" + + case *ast.BasicLit: + return v.Value + + case *ast.Interpolation: + for _, e := range v.Elts { + out += DebugStr(e) + } + return out + + case *ast.EmbedDecl: + out += DebugStr(v.Expr) + return out + + case *ast.ImportDecl: + out := "import " + if v.Lparen != token.NoPos { + out += "( " + out += DebugStr(v.Specs) + out += " )" + } else { + out += DebugStr(v.Specs) + } + return out + + case *ast.Comprehension: + out := DebugStr(v.Clauses) + out += DebugStr(v.Value) + return out + + case *ast.StructLit: + out := "{" + out += DebugStr(v.Elts) + out += "}" + return out + + case *ast.ListLit: + out := "[" + out += DebugStr(v.Elts) + out += "]" + return out + + case *ast.Ellipsis: + out := "..." + if v.Type != nil { + out += DebugStr(v.Type) + } + return out + + case *ast.ForClause: + out := "for " + if v.Key != nil { + out += DebugStr(v.Key) + out += ": " + } + out += DebugStr(v.Value) + out += " in " + out += DebugStr(v.Source) + return out + + case *ast.IfClause: + out := "if " + out += DebugStr(v.Condition) + return out + + case *ast.Field: + out := DebugStr(v.Label) + if v.Optional != token.NoPos { + out += "?" + } + if v.Value != nil { + switch v.Token { + case token.ILLEGAL, token.COLON: + out += ": " + default: + out += fmt.Sprintf(" %s ", v.Token) + } + out += DebugStr(v.Value) + for _, a := range v.Attrs { + out += " " + out += DebugStr(a) + } + } + return out + + case *ast.Attribute: + return v.Text + + case *ast.Ident: + return v.Name + + case *ast.SelectorExpr: + return DebugStr(v.X) + "." + DebugStr(v.Sel) + + case *ast.CallExpr: + out := DebugStr(v.Fun) + out += "(" + out += DebugStr(v.Args) + out += ")" + return out + + case *ast.ParenExpr: + out := "(" + out += DebugStr(v.X) + out += ")" + return out + + case *ast.UnaryExpr: + return v.Op.String() + DebugStr(v.X) + + case *ast.BinaryExpr: + out := DebugStr(v.X) + op := v.Op.String() + if 'a' <= op[0] && op[0] <= 'z' { + op = fmt.Sprintf(" %s ", op) + } + out += op + out += DebugStr(v.Y) + return out + + case []*ast.CommentGroup: + var a []string + for _, c := range v { + a = append(a, DebugStr(c)) + } + return strings.Join(a, "\n") + + case *ast.CommentGroup: + str := "[" + if v.Doc { + str += "d" + } + if v.Line { + str += "l" + } + str += strconv.Itoa(int(v.Position)) + var a = []string{} + for _, c := range v.List { + a = append(a, c.Text) + } + return str + strings.Join(a, " ") + "] " + + case *ast.IndexExpr: + out := DebugStr(v.X) + out += "[" + out += DebugStr(v.Index) + out += "]" + return out + + case *ast.SliceExpr: + out := DebugStr(v.X) + out += "[" + out += DebugStr(v.Low) + out += ":" + out += DebugStr(v.High) + out += "]" + return out + + case *ast.ImportSpec: + out := "" + if v.Name != nil { + out += DebugStr(v.Name) + out += " " + } + out += DebugStr(v.Path) + return out + + case []ast.Decl: + if len(v) == 0 { + return "" + } + out := "" + for _, d := range v { + out += DebugStr(d) + out += sep + } + return out[:len(out)-len(sep)] + + case []ast.Clause: + if len(v) == 0 { + return "" + } + out := "" + for _, c := range v { + out += DebugStr(c) + out += " " + } + return out + + case []ast.Expr: + if len(v) == 0 { + return "" + } + out := "" + for _, d := range v { + out += DebugStr(d) + out += sep + } + return out[:len(out)-len(sep)] + + case []*ast.ImportSpec: + if len(v) == 0 { + return "" + } + out := "" + for _, d := range v { + out += DebugStr(d) + out += sep + } + return out[:len(out)-len(sep)] + + default: + if v == nil { + return "" + } + return fmt.Sprintf("<%T>", x) + } +} + +const sep = ", " diff --git a/vendor/cuelang.org/go/internal/attrs.go b/vendor/cuelang.org/go/internal/attrs.go new file mode 100644 index 0000000000..058948012f --- /dev/null +++ b/vendor/cuelang.org/go/internal/attrs.go @@ -0,0 +1,252 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strconv" + "strings" + "unicode" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +// AttrKind indicates the location of an attribute within CUE source. +type AttrKind uint8 + +const ( + // FieldAttr indicates an attribute is a field attribute. + // foo: bar @attr() + FieldAttr AttrKind = 1 << iota + + // DeclAttr indicates an attribute was specified at a declaration position. + // foo: { + // @attr() + // } + DeclAttr + + // TODO: Possible future attr kinds + // ElemAttr + // FileAttr + // ValueAttr = FieldAttr|DeclAttr|ElemAttr +) + +// Attr holds positional information for a single Attr. +type Attr struct { + Name string // e.g. "json" or "protobuf" + Body string + Kind AttrKind + Fields []KeyValue + Err error +} + +// NewNonExisting creates a non-existing attribute. +func NewNonExisting(key string) Attr { + const msgNotExist = "attribute %q does not exist" + return Attr{Err: errors.Newf(token.NoPos, msgNotExist, key)} +} + +type KeyValue struct { + data string + equal int // index of equal sign or 0 if non-existing +} + +func (kv *KeyValue) Text() string { return kv.data } +func (kv *KeyValue) Key() string { + if kv.equal == 0 { + return kv.data + } + s := kv.data[:kv.equal] + s = strings.TrimSpace(s) + return s +} +func (kv *KeyValue) Value() string { + if kv.equal == 0 { + return "" + } + return strings.TrimSpace(kv.data[kv.equal+1:]) +} + +func (a *Attr) hasPos(p int) error { + if a.Err != nil { + return a.Err + } + if p >= len(a.Fields) { + return fmt.Errorf("field does not exist") + } + return nil +} + +// String reports the possibly empty string value at the given position or +// an error the attribute is invalid or if the position does not exist. +func (a *Attr) String(pos int) (string, error) { + if err := a.hasPos(pos); err != nil { + return "", err + } + return a.Fields[pos].Text(), nil +} + +// Int reports the integer at the given position or an error if the attribute is +// invalid, the position does not exist, or the value at the given position is +// not an integer. +func (a *Attr) Int(pos int) (int64, error) { + if err := a.hasPos(pos); err != nil { + return 0, err + } + // TODO: use CUE's literal parser once it exists, allowing any of CUE's + // number types. + return strconv.ParseInt(a.Fields[pos].Text(), 10, 64) +} + +// Flag reports whether an entry with the given name exists at position pos or +// onwards or an error if the attribute is invalid or if the first pos-1 entries +// are not defined. +func (a *Attr) Flag(pos int, key string) (bool, error) { + if err := a.hasPos(pos - 1); err != nil { + return false, err + } + for _, kv := range a.Fields[pos:] { + if kv.Text() == key { + return true, nil + } + } + return false, nil +} + +// Lookup searches for an entry of the form key=value from position pos onwards +// and reports the value if found. It reports an error if the attribute is +// invalid or if the first pos-1 entries are not defined. +func (a *Attr) Lookup(pos int, key string) (val string, found bool, err error) { + if err := a.hasPos(pos - 1); err != nil { + return "", false, err + } + for _, kv := range a.Fields[pos:] { + if kv.Key() == key { + return kv.Value(), true, nil + } + } + return "", false, nil +} + +func ParseAttrBody(pos token.Pos, s string) (a Attr) { + a.Body = s + i := 0 + for { + i += skipSpace(s[i:]) + // always scan at least one, possibly empty element. + n, err := scanAttributeElem(pos, s[i:], &a) + if err != nil { + return Attr{Err: err} + } + if i += n; i >= len(s) { + break + } + i += skipSpace(s[i:]) + if s[i] != ',' { + return Attr{Err: errors.Newf(pos, "invalid attribute: expected comma")} + } + i++ + } + return a +} + +func skipSpace(s string) int { + for n, r := range s { + if !unicode.IsSpace(r) { + return n + } + } + return 0 +} + +func scanAttributeElem(pos token.Pos, s string, a *Attr) (n int, err errors.Error) { + // try CUE string + kv := KeyValue{} + if n, kv.data, err = scanAttributeString(pos, s); n == 0 { + // try key-value pair + p := strings.IndexAny(s, ",=") // ) is assumed to be stripped. + switch { + case p < 0: + kv.data = strings.TrimSpace(s) + n = len(s) + + default: // ',' + n = p + kv.data = strings.TrimSpace(s[:n]) + + case s[p] == '=': + kv.equal = p + offset := p + 1 + offset += skipSpace(s[offset:]) + var str string + if p, str, err = scanAttributeString(pos, s[offset:]); p > 0 { + n = offset + p + kv.data = s[:offset] + str + } else { + n = len(s) + if p = strings.IndexByte(s[offset:], ','); p >= 0 { + n = offset + p + } + kv.data = strings.TrimSpace(s[:n]) + } + } + } + if a != nil { + a.Fields = append(a.Fields, kv) + } + return n, err +} + +func scanAttributeString(pos token.Pos, s string) (n int, str string, err errors.Error) { + if s == "" || (s[0] != '#' && s[0] != '"' && s[0] != '\'') { + return 0, "", nil + } + + nHash := 0 + for { + if nHash < len(s) { + if s[nHash] == '#' { + nHash++ + continue + } + if s[nHash] == '\'' || s[nHash] == '"' { + break + } + } + return nHash, s[:nHash], errors.Newf(pos, "invalid attribute string") + } + + // Determine closing quote. + nQuote := 1 + if c := s[nHash]; nHash+6 < len(s) && s[nHash+1] == c && s[nHash+2] == c { + nQuote = 3 + } + close := s[nHash:nHash+nQuote] + s[:nHash] + + // Search for closing quote. + index := strings.Index(s[len(close):], close) + if index == -1 { + return len(s), "", errors.Newf(pos, "attribute string not terminated") + } + + index += 2 * len(close) + s, err2 := literal.Unquote(s[:index]) + if err2 != nil { + return index, "", errors.Newf(pos, "invalid attribute string: %v", err2) + } + return index, s, nil +} diff --git a/vendor/cuelang.org/go/internal/cli/cli.go b/vendor/cuelang.org/go/internal/cli/cli.go new file mode 100644 index 0000000000..f6ffd251fe --- /dev/null +++ b/vendor/cuelang.org/go/internal/cli/cli.go @@ -0,0 +1,91 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cli + +import ( + "strings" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" +) + +func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs errors.Error) { + var expr ast.Expr + + if k&cue.NumberKind != 0 { + var err error + expr, err = parser.ParseExpr(name, str) + if err != nil { + errs = errors.Wrapf(err, pos, + "invalid number for environment variable %s", name) + } + } + + if k&cue.BoolKind != 0 { + str = strings.TrimSpace(str) + b, ok := boolValues[str] + if !ok { + errs = errors.Append(errs, errors.Newf(pos, + "invalid boolean value %q for environment variable %s", str, name)) + } else if expr != nil || k&cue.StringKind != 0 { + // Convert into an expression + bl := ast.NewBool(b) + if expr != nil { + expr = &ast.BinaryExpr{Op: token.OR, X: expr, Y: bl} + } else { + expr = bl + } + } else { + x = ast.NewBool(b) + } + } + + if k&cue.StringKind != 0 { + if expr != nil { + expr = &ast.BinaryExpr{Op: token.OR, X: expr, Y: ast.NewString(str)} + } else { + x = ast.NewString(str) + } + } + + switch { + case expr != nil: + return expr, nil + case x != nil: + return x, nil + case errs == nil: + return nil, errors.Newf(pos, + "invalid type for environment variable %s", name) + } + return nil, errs +} + +var boolValues = map[string]bool{ + "1": true, + "0": false, + "t": true, + "f": false, + "T": true, + "F": false, + "true": true, + "false": false, + "TRUE": true, + "FALSE": false, + "True": true, + "False": false, +} diff --git a/vendor/cuelang.org/go/internal/core/adt/adt.go b/vendor/cuelang.org/go/internal/core/adt/adt.go new file mode 100644 index 0000000000..1a286e7596 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/adt.go @@ -0,0 +1,380 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +func Resolve(ctx *OpContext, c Conjunct) *Vertex { + env := c.Env + // TODO: also allow resolution in parent scopes. The following will set up + // the environments. But the compiler also needs to resolve accordingly. + // + // // Set up environments for parent scopes, if any. + // root := env + // for p := scope; p != nil; p = p.Parent { + // root.Up = &Environment{Vertex: p.Parent} + // root = root.Up + // } + + var v Value + + expr := c.Elem() + switch x := expr.(type) { + case Value: + v = x + + case Resolver: + r, err := ctx.Resolve(env, x) + if err != nil { + v = err + break + } + // r.Finalize(ctx) // TODO: Finalize here? + return r + + case Evaluator: + // TODO: have a way to evaluate, but not strip down to the value. + v, _ = ctx.Evaluate(env, expr.(Expr)) + + default: + // Unknown type. + v = ctx.NewErrf( + "could not evaluate expression %s of type %T", c.Elem(), c) + } + + return ToVertex(v) +} + +// A Node is any abstract data type representing an value or expression. +type Node interface { + Source() ast.Node + node() // enforce internal. +} + +// A Decl represents all valid StructLit elements. +type Decl interface { + Node + declNode() +} + +// An Elem represents all value ListLit elements. +// +// All Elem values can be used as a Decl. +type Elem interface { + Decl + elemNode() +} + +// An Expr corresponds to an ast.Expr. +// +// All Expr values can be used as an Elem or Decl. +type Expr interface { + Elem + expr() +} + +// A BaseValue is any Value or a *Marker. It indicates the type of a Vertex. +type BaseValue interface { + Kind() Kind +} + +// A Value represents a node in the evaluated data graph. +// +// All Values values can also be used as a Expr. +type Value interface { + Expr + Concreteness() Concreteness + Kind() Kind +} + +// An Evaluator provides a method to convert to a value. +type Evaluator interface { + Node + + // evaluate evaluates the underlying expression. If the expression + // is incomplete, it may record the error in ctx and return nil. + evaluate(ctx *OpContext) Value +} + +// A Resolver represents a reference somewhere else within a tree that resolves +// a value. +type Resolver interface { + Node + resolve(ctx *OpContext, state VertexStatus) *Vertex +} + +type YieldFunc func(env *Environment) + +// A Yielder represents 0 or more labeled values of structs or lists. +type Yielder interface { + Node + yield(ctx *OpContext, fn YieldFunc) +} + +// A Validator validates a Value. All Validators are Values. +type Validator interface { + Value + validate(c *OpContext, v Value) *Bottom +} + +// Pos returns the file position of n, or token.NoPos if it is unknown. +func Pos(n Node) token.Pos { + src := n.Source() + if src == nil { + return token.NoPos + } + return src.Pos() +} + +// Value + +func (x *Vertex) Concreteness() Concreteness { + // Depends on concreteness of value. + switch v := x.BaseValue.(type) { + case nil: + return Concrete // Should be indetermined. + + case Value: + return v.Concreteness() + + default: // *StructMarker, *ListMarker: + return Concrete + } +} + +func (x *NodeLink) Concreteness() Concreteness { return Concrete } + +func (*Conjunction) Concreteness() Concreteness { return Constraint } +func (*Disjunction) Concreteness() Concreteness { return Constraint } +func (*BoundValue) Concreteness() Concreteness { return Constraint } + +func (*Builtin) Concreteness() Concreteness { return Concrete } +func (*BuiltinValidator) Concreteness() Concreteness { return Constraint } + +// Value and Expr + +func (*Bottom) Concreteness() Concreteness { return BottomLevel } +func (*Null) Concreteness() Concreteness { return Concrete } +func (*Bool) Concreteness() Concreteness { return Concrete } +func (*Num) Concreteness() Concreteness { return Concrete } +func (*String) Concreteness() Concreteness { return Concrete } +func (*Bytes) Concreteness() Concreteness { return Concrete } +func (*Top) Concreteness() Concreteness { return Any } +func (*BasicType) Concreteness() Concreteness { return Type } + +// Expr + +func (*StructLit) expr() {} +func (*ListLit) expr() {} +func (*DisjunctionExpr) expr() {} + +// TODO: also allow? +// a: b: if cond {} +// +// It is unclear here, though, whether field `a` should be added +// unconditionally. +// func (*Comprehension) expr() {} + +// Expr and Value + +func (*Bottom) expr() {} +func (*Null) expr() {} +func (*Bool) expr() {} +func (*Num) expr() {} +func (*String) expr() {} +func (*Bytes) expr() {} +func (*Top) expr() {} +func (*BasicType) expr() {} +func (*Vertex) expr() {} +func (*ListMarker) expr() {} +func (*StructMarker) expr() {} +func (*Conjunction) expr() {} +func (*Disjunction) expr() {} +func (*BoundValue) expr() {} +func (*BuiltinValidator) expr() {} +func (*Builtin) expr() {} + +// Expr and Resolver + +func (*NodeLink) expr() {} +func (*FieldReference) expr() {} +func (*ValueReference) expr() {} +func (*LabelReference) expr() {} +func (*DynamicReference) expr() {} +func (*ImportReference) expr() {} +func (*LetReference) expr() {} + +// Expr and Evaluator + +func (*BoundExpr) expr() {} +func (*SelectorExpr) expr() {} +func (*IndexExpr) expr() {} +func (*SliceExpr) expr() {} +func (*Interpolation) expr() {} +func (*UnaryExpr) expr() {} +func (*BinaryExpr) expr() {} +func (*CallExpr) expr() {} + +// Decl and Expr (so allow attaching original source in Conjunct) + +func (*Field) declNode() {} +func (x *Field) expr() Expr { return x.Value } +func (*OptionalField) declNode() {} +func (x *OptionalField) expr() Expr { return x.Value } +func (*BulkOptionalField) declNode() {} +func (x *BulkOptionalField) expr() Expr { return x.Value } +func (*DynamicField) declNode() {} +func (x *DynamicField) expr() Expr { return x.Value } + +// Decl, Elem, and Expr (so allow attaching original source in Conjunct) + +func (*Ellipsis) elemNode() {} +func (*Ellipsis) declNode() {} +func (x *Ellipsis) expr() Expr { + if x.Value == nil { + return top + } + return x.Value +} + +var top = &Top{} + +// Decl and Yielder + +func (*LetClause) declNode() {} + +// Decl and Elem + +func (*StructLit) declNode() {} +func (*StructLit) elemNode() {} +func (*ListLit) declNode() {} +func (*ListLit) elemNode() {} +func (*Bottom) declNode() {} +func (*Bottom) elemNode() {} +func (*Null) declNode() {} +func (*Null) elemNode() {} +func (*Bool) declNode() {} +func (*Bool) elemNode() {} +func (*Num) declNode() {} +func (*Num) elemNode() {} +func (*String) declNode() {} +func (*String) elemNode() {} +func (*Bytes) declNode() {} +func (*Bytes) elemNode() {} +func (*Top) declNode() {} +func (*Top) elemNode() {} +func (*BasicType) declNode() {} +func (*BasicType) elemNode() {} +func (*BoundExpr) declNode() {} +func (*BoundExpr) elemNode() {} +func (*Vertex) declNode() {} +func (*Vertex) elemNode() {} +func (*ListMarker) declNode() {} +func (*ListMarker) elemNode() {} +func (*StructMarker) declNode() {} +func (*StructMarker) elemNode() {} +func (*Conjunction) declNode() {} +func (*Conjunction) elemNode() {} +func (*Disjunction) declNode() {} +func (*Disjunction) elemNode() {} +func (*BoundValue) declNode() {} +func (*BoundValue) elemNode() {} +func (*BuiltinValidator) declNode() {} +func (*BuiltinValidator) elemNode() {} +func (*NodeLink) declNode() {} +func (*NodeLink) elemNode() {} +func (*FieldReference) declNode() {} +func (*FieldReference) elemNode() {} +func (*ValueReference) declNode() {} +func (*ValueReference) elemNode() {} +func (*LabelReference) declNode() {} +func (*LabelReference) elemNode() {} +func (*DynamicReference) declNode() {} +func (*DynamicReference) elemNode() {} +func (*ImportReference) declNode() {} +func (*ImportReference) elemNode() {} +func (*LetReference) declNode() {} +func (*LetReference) elemNode() {} +func (*SelectorExpr) declNode() {} +func (*SelectorExpr) elemNode() {} +func (*IndexExpr) declNode() {} +func (*IndexExpr) elemNode() {} +func (*SliceExpr) declNode() {} +func (*SliceExpr) elemNode() {} +func (*Interpolation) declNode() {} +func (*Interpolation) elemNode() {} +func (*UnaryExpr) declNode() {} +func (*UnaryExpr) elemNode() {} +func (*BinaryExpr) declNode() {} +func (*BinaryExpr) elemNode() {} +func (*CallExpr) declNode() {} +func (*CallExpr) elemNode() {} +func (*Builtin) declNode() {} +func (*Builtin) elemNode() {} +func (*DisjunctionExpr) declNode() {} +func (*DisjunctionExpr) elemNode() {} + +// Decl, Elem, and Yielder + +func (*Comprehension) declNode() {} +func (*Comprehension) elemNode() {} + +// Node + +func (*Vertex) node() {} +func (*Conjunction) node() {} +func (*Disjunction) node() {} +func (*BoundValue) node() {} +func (*Builtin) node() {} +func (*BuiltinValidator) node() {} +func (*Bottom) node() {} +func (*Null) node() {} +func (*Bool) node() {} +func (*Num) node() {} +func (*String) node() {} +func (*Bytes) node() {} +func (*Top) node() {} +func (*BasicType) node() {} +func (*StructLit) node() {} +func (*ListLit) node() {} +func (*BoundExpr) node() {} +func (*NodeLink) node() {} +func (*FieldReference) node() {} +func (*ValueReference) node() {} +func (*LabelReference) node() {} +func (*DynamicReference) node() {} +func (*ImportReference) node() {} +func (*LetReference) node() {} +func (*SelectorExpr) node() {} +func (*IndexExpr) node() {} +func (*SliceExpr) node() {} +func (*Interpolation) node() {} +func (*UnaryExpr) node() {} +func (*BinaryExpr) node() {} +func (*CallExpr) node() {} +func (*DisjunctionExpr) node() {} +func (*Field) node() {} +func (*OptionalField) node() {} +func (*BulkOptionalField) node() {} +func (*DynamicField) node() {} +func (*Ellipsis) node() {} +func (*Comprehension) node() {} +func (*ForClause) node() {} +func (*IfClause) node() {} +func (*LetClause) node() {} +func (*ValueClause) node() {} diff --git a/vendor/cuelang.org/go/internal/core/adt/binop.go b/vendor/cuelang.org/go/internal/core/adt/binop.go new file mode 100644 index 0000000000..e2410a1e8f --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/binop.go @@ -0,0 +1,321 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "bytes" + "strings" +) + +// BinOp handles all operations except AndOp and OrOp. This includes processing +// unary comparators such as '<4' and '=~"foo"'. +// +// BinOp returns nil if not both left and right are concrete. +func BinOp(c *OpContext, op Op, left, right Value) Value { + leftKind := left.Kind() + rightKind := right.Kind() + + const msg = "non-concrete value '%v' to operation '%s'" + if left.Concreteness() > Concrete { + return &Bottom{ + Code: IncompleteError, + Err: c.Newf(msg, left, op), + } + } + if right.Concreteness() > Concrete { + return &Bottom{ + Code: IncompleteError, + Err: c.Newf(msg, right, op), + } + } + + if err := CombineErrors(c.src, left, right); err != nil { + return err + } + + switch op { + case EqualOp: + switch { + case leftKind == NullKind && rightKind == NullKind: + return c.newBool(true) + + case leftKind == NullKind || rightKind == NullKind: + return c.newBool(false) + + case leftKind == BoolKind: + return c.newBool(c.BoolValue(left) == c.BoolValue(right)) + + case leftKind == StringKind: + // normalize? + return cmpTonode(c, op, strings.Compare(c.StringValue(left), c.StringValue(right))) + + case leftKind == BytesKind: + return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op))) + + case leftKind&NumKind != 0 && rightKind&NumKind != 0: + // n := c.newNum() + return cmpTonode(c, op, c.Num(left, op).X.Cmp(&c.Num(right, op).X)) + + case leftKind == ListKind && rightKind == ListKind: + x := c.Elems(left) + y := c.Elems(right) + if len(x) != len(y) { + return c.newBool(false) + } + for i, e := range x { + a, _ := c.Concrete(nil, e, op) + b, _ := c.Concrete(nil, y[i], op) + if !test(c, EqualOp, a, b) { + return c.newBool(false) + } + } + return c.newBool(true) + } + + case NotEqualOp: + switch { + case leftKind == NullKind && rightKind == NullKind: + return c.newBool(false) + + case leftKind == NullKind || rightKind == NullKind: + return c.newBool(true) + + case leftKind == BoolKind: + return c.newBool(c.boolValue(left, op) != c.boolValue(right, op)) + + case leftKind == StringKind: + // normalize? + return cmpTonode(c, op, strings.Compare(c.StringValue(left), c.StringValue(right))) + + case leftKind == BytesKind: + return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op))) + + case leftKind&NumKind != 0 && rightKind&NumKind != 0: + // n := c.newNum() + return cmpTonode(c, op, c.Num(left, op).X.Cmp(&c.Num(right, op).X)) + + case leftKind == ListKind && rightKind == ListKind: + x := c.Elems(left) + y := c.Elems(right) + if len(x) != len(y) { + return c.newBool(false) + } + for i, e := range x { + a, _ := c.Concrete(nil, e, op) + b, _ := c.Concrete(nil, y[i], op) + if !test(c, EqualOp, a, b) { + return c.newBool(true) + } + } + return c.newBool(false) + } + + case LessThanOp, LessEqualOp, GreaterEqualOp, GreaterThanOp: + switch { + case leftKind == StringKind && rightKind == StringKind: + // normalize? + return cmpTonode(c, op, strings.Compare(c.stringValue(left, op), c.stringValue(right, op))) + + case leftKind == BytesKind && rightKind == BytesKind: + return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op))) + + case leftKind&NumKind != 0 && rightKind&NumKind != 0: + // n := c.newNum(left, right) + return cmpTonode(c, op, c.Num(left, op).X.Cmp(&c.Num(right, op).X)) + } + + case BoolAndOp: + return c.newBool(c.boolValue(left, op) && c.boolValue(right, op)) + + case BoolOrOp: + return c.newBool(c.boolValue(left, op) || c.boolValue(right, op)) + + case MatchOp: + // if y.re == nil { + // // This really should not happen, but leave in for safety. + // b, err := Regexp.MatchString(str, x.str) + // if err != nil { + // return c.Errf(Src, "error parsing Regexp: %v", err) + // } + // return boolTonode(Src, b) + // } + return c.newBool(c.regexp(right).MatchString(c.stringValue(left, op))) + + case NotMatchOp: + return c.newBool(!c.regexp(right).MatchString(c.stringValue(left, op))) + + case AddOp: + switch { + case leftKind&NumKind != 0 && rightKind&NumKind != 0: + return c.Add(c.Num(left, op), c.Num(right, op)) + + case leftKind == StringKind && rightKind == StringKind: + return c.NewString(c.StringValue(left) + c.StringValue(right)) + + case leftKind == BytesKind && rightKind == BytesKind: + ba := c.bytesValue(left, op) + bb := c.bytesValue(right, op) + b := make([]byte, len(ba)+len(bb)) + copy(b, ba) + copy(b[len(ba):], bb) + return c.newBytes(b) + + case leftKind == ListKind && rightKind == ListKind: + // TODO: get rid of list addition. Semantically it is somewhat + // unclear and, as it turns out, it is also hard to get right. + // Simulate addition with comprehensions now. + if err := c.Err(); err != nil { + return err + } + + x := MakeIdentLabel(c, "x", "") + + forClause := func(src Expr) *Comprehension { + s := &StructLit{Decls: []Decl{ + &FieldReference{UpCount: 1, Label: x}, + }} + return &Comprehension{ + Clauses: &ForClause{ + Value: x, + Src: src, + Dst: &ValueClause{s}, + }, + Value: s, + } + } + + list := &ListLit{ + Elems: []Elem{ + forClause(left), + forClause(right), + }, + } + + n := &Vertex{} + n.AddConjunct(MakeRootConjunct(c.Env(0), list)) + n.Finalize(c) + + return n + } + + case SubtractOp: + return c.Sub(c.Num(left, op), c.Num(right, op)) + + case MultiplyOp: + switch { + // float + case leftKind&NumKind != 0 && rightKind&NumKind != 0: + return c.Mul(c.Num(left, op), c.Num(right, op)) + + case leftKind == StringKind && rightKind == IntKind: + const as = "string multiplication" + return c.NewString(strings.Repeat(c.stringValue(left, as), int(c.uint64(right, as)))) + + case leftKind == IntKind && rightKind == StringKind: + const as = "string multiplication" + return c.NewString(strings.Repeat(c.stringValue(right, as), int(c.uint64(left, as)))) + + case leftKind == BytesKind && rightKind == IntKind: + const as = "bytes multiplication" + return c.newBytes(bytes.Repeat(c.bytesValue(left, as), int(c.uint64(right, as)))) + + case leftKind == IntKind && rightKind == BytesKind: + const as = "bytes multiplication" + return c.newBytes(bytes.Repeat(c.bytesValue(right, as), int(c.uint64(left, as)))) + + case leftKind == ListKind && rightKind == IntKind: + left, right = right, left + fallthrough + + case leftKind == IntKind && rightKind == ListKind: + // TODO: get rid of list multiplication. + + list := &ListLit{} + x := MakeIdentLabel(c, "x", "") + + for i := c.uint64(left, "list multiplier"); i > 0; i-- { + st := &StructLit{Decls: []Decl{ + &FieldReference{UpCount: 1, Label: x}, + }} + list.Elems = append(list.Elems, + &Comprehension{ + Clauses: &ForClause{ + Value: x, + Src: right, + Dst: &ValueClause{st}, + }, + Value: st, + }, + ) + } + if err := c.Err(); err != nil { + return err + } + + n := &Vertex{} + n.AddConjunct(MakeRootConjunct(c.Env(0), list)) + n.Finalize(c) + + return n + } + + case FloatQuotientOp: + if leftKind&NumKind != 0 && rightKind&NumKind != 0 { + return c.Quo(c.Num(left, op), c.Num(right, op)) + } + + case IntDivideOp: + if leftKind&IntKind != 0 && rightKind&IntKind != 0 { + return c.IntDiv(c.Num(left, op), c.Num(right, op)) + } + + case IntModuloOp: + if leftKind&IntKind != 0 && rightKind&IntKind != 0 { + return c.IntMod(c.Num(left, op), c.Num(right, op)) + } + + case IntQuotientOp: + if leftKind&IntKind != 0 && rightKind&IntKind != 0 { + return c.IntQuo(c.Num(left, op), c.Num(right, op)) + } + + case IntRemainderOp: + if leftKind&IntKind != 0 && rightKind&IntKind != 0 { + return c.IntRem(c.Num(left, op), c.Num(right, op)) + } + } + + return c.NewErrf("invalid operands %s and %s to '%s' (type %s and %s)", + left, right, op, left.Kind(), right.Kind()) +} + +func cmpTonode(c *OpContext, op Op, r int) Value { + result := false + switch op { + case LessThanOp: + result = r == -1 + case LessEqualOp: + result = r != 1 + case EqualOp, AndOp: + result = r == 0 + case NotEqualOp: + result = r != 0 + case GreaterEqualOp: + result = r != -1 + case GreaterThanOp: + result = r == 1 + } + return c.newBool(result) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/closed.go b/vendor/cuelang.org/go/internal/core/adt/closed.go new file mode 100644 index 0000000000..34a3e45e39 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/closed.go @@ -0,0 +1,518 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// This file implements the closedness algorithm. + +// Outline of algorithm +// +// To compute closedness each Vertex is associated with a tree which has +// leaf nodes with sets of allowed labels, and interior nodes that describe +// how these sets may be combines: Or, for embedding, or And for definitions. +// +// Each conjunct of a Vertex is associated with such a leaf node. Each +// conjunct that evaluates to a struct is added to the list of Structs, which +// in the end forms this tree. If a conjunct is embedded, or references another +// struct or definition, it adds interior node to reflect this. +// +// To test whether a feature is allowed, it must satisfy the resulting +// expression tree. +// +// In order to avoid having to copy the tree for each node, the tree is linked +// from leaf node to root, rather than the other way around. This allows +// parent nodes to be shared as the tree grows and ensures that the growth +// of the tree is bounded by the number of conjuncts. As a consequence, this +// requires a two-pass algorithm: +// +// - walk up to mark which nodes are required and count the number of +// child nodes that need to be satisfied. +// - verify fields in leaf structs and mark parent leafs as satisfied +// when appropriate. +// +// A label is allowed if all required root nodes are marked as accepted after +// these two passes. +// + +// A note on embeddings: it is important to keep track which conjuncts originate +// from an embedding, as an embedded value may eventually turn into a closed +// struct. Consider +// +// a: { +// b +// d: e: int +// } +// b: d: { +// #A & #B +// } +// +// At the point of evaluating `a`, the struct is not yet closed. However, +// descending into `d` will trigger the inclusion of definitions which in turn +// causes the struct to be closed. At this point, it is important to know that +// `b` originated from an embedding, as otherwise `e` may not be allowed. + +// TODO(perf): +// - less nodes +// - disable StructInfo nodes that can no longer pass a feature +// - sort StructInfos active ones first. + +// TODO(errors): return a dedicated ConflictError that can track original +// positions on demand. + +func (v *Vertex) IsInOneOf(t SpanType) bool { + for _, s := range v.Structs { + if s.CloseInfo.IsInOneOf(t) { + return true + } + } + return false +} + +// IsRecursivelyClosed returns true if this value is either a definition or unified +// with a definition. +func (v *Vertex) IsRecursivelyClosed() bool { + return v.Closed || v.IsInOneOf(DefinitionSpan) +} + +type closeNodeType uint8 + +const ( + // a closeRef node is created when there is a non-definition reference. + // These nodes are not necessary for computing results, but may be + // relevant down the line to group closures through embedded values and + // to track position information for failures. + closeRef closeNodeType = iota + + // closeDef indicates this node was introduced as a result of referencing + // a definition. + closeDef + + // closeEmbed indicates this node was added as a result of an embedding. + closeEmbed + + _ = closeRef // silence the linter +) + +// TODO: merge with closeInfo: this is a leftover of the refactoring. +type CloseInfo struct { + *closeInfo + + IsClosed bool + FieldTypes OptionalType +} + +func (c CloseInfo) Location() Node { + if c.closeInfo == nil { + return nil + } + return c.closeInfo.location +} + +func (c CloseInfo) SpanMask() SpanType { + if c.closeInfo == nil { + return 0 + } + return c.span +} + +func (c CloseInfo) RootSpanType() SpanType { + if c.closeInfo == nil { + return 0 + } + return c.root +} + +func (c CloseInfo) IsInOneOf(t SpanType) bool { + if c.closeInfo == nil { + return false + } + return c.span&t != 0 +} + +// TODO(perf): remove: error positions should always be computed on demand +// in dedicated error types. +func (c *CloseInfo) AddPositions(ctx *OpContext) { + for s := c.closeInfo; s != nil; s = s.parent { + if loc := s.location; loc != nil { + ctx.AddPosition(loc) + } + } +} + +// TODO(perf): use on StructInfo. Then if parent and expression are the same +// it is possible to use cached value. +func (c CloseInfo) SpawnEmbed(x Expr) CloseInfo { + var span SpanType + if c.closeInfo != nil { + span = c.span + } + + c.closeInfo = &closeInfo{ + parent: c.closeInfo, + location: x, + mode: closeEmbed, + root: EmbeddingSpan, + span: span | EmbeddingSpan, + } + return c +} + +// SpawnGroup is used for structs that contain embeddings that may end up +// closing the struct. This is to force that `b` is not allowed in +// +// a: {#foo} & {b: int} +// +func (c CloseInfo) SpawnGroup(x Expr) CloseInfo { + var span SpanType + if c.closeInfo != nil { + span = c.span + } + c.closeInfo = &closeInfo{ + parent: c.closeInfo, + location: x, + span: span, + } + return c +} + +// SpawnSpan is used to track that a value is introduced by a comprehension +// or constraint. Definition and embedding spans are introduced with SpawnRef +// and SpawnEmbed, respectively. +func (c CloseInfo) SpawnSpan(x Node, t SpanType) CloseInfo { + var span SpanType + if c.closeInfo != nil { + span = c.span + } + c.closeInfo = &closeInfo{ + parent: c.closeInfo, + location: x, + root: t, + span: span | t, + } + return c +} + +func (c CloseInfo) SpawnRef(arc *Vertex, isDef bool, x Expr) CloseInfo { + var span SpanType + if c.closeInfo != nil { + span = c.span + } + c.closeInfo = &closeInfo{ + parent: c.closeInfo, + location: x, + span: span, + } + if isDef { + c.mode = closeDef + c.closeInfo.root = DefinitionSpan + c.closeInfo.span |= DefinitionSpan + } + return c +} + +// isDef reports whether an expressions is a reference that references a +// definition anywhere in its selection path. +// +// TODO(performance): this should be merged with resolve(). But for now keeping +// this code isolated makes it easier to see what it is for. +func IsDef(x Expr) bool { + switch r := x.(type) { + case *FieldReference: + return r.Label.IsDef() + + case *SelectorExpr: + if r.Sel.IsDef() { + return true + } + return IsDef(r.X) + + case *IndexExpr: + return IsDef(r.X) + } + return false +} + +// A SpanType is used to indicate whether a CUE value is within the scope of +// a certain CUE language construct, the span type. +type SpanType uint8 + +const ( + // EmbeddingSpan means that this value was embedded at some point and should + // not be included as a possible root node in the todo field of OpContext. + EmbeddingSpan SpanType = 1 << iota + ConstraintSpan + ComprehensionSpan + DefinitionSpan +) + +type closeInfo struct { + // location records the expression that led to this node's introduction. + location Node + + // The parent node in the tree. + parent *closeInfo + + // TODO(performance): if references are chained, we could have a separate + // parent pointer to skip the chain. + + // mode indicates whether this node was added as part of an embedding, + // definition or non-definition reference. + mode closeNodeType + + // noCheck means this struct is irrelevant for closedness checking. This can + // happen when: + // - it is a sibling of a new definition. + noCheck bool // don't process for inclusion info + + root SpanType + span SpanType +} + +// closeStats holds the administrative fields for a closeInfo value. Each +// closeInfo is associated with a single closeStats value per unification +// operator. This association is done through an OpContext. This allows the +// same value to be used in multiple concurrent unification operations. +// NOTE: there are other parts of the algorithm that are not thread-safe yet. +type closeStats struct { + // the other fields of this closeStats value are only valid if generation + // is equal to the generation in OpContext. This allows for lazy + // initialization of closeStats. + generation int + + // These counts keep track of how many required child nodes need to be + // completed before this node is accepted. + requiredCount int + acceptedCount int + + // accepted is set if this node is accepted. + accepted bool + + required bool + next *closeStats +} + +func (c *closeInfo) isClosed() bool { + return c.mode == closeDef +} + +func isClosed(v *Vertex) bool { + for _, s := range v.Structs { + if s.IsClosed { + return true + } + for c := s.closeInfo; c != nil; c = c.parent { + if c.isClosed() { + return true + } + } + } + return false +} + +// Accept determines whether f is allowed in n. It uses the OpContext for +// caching administrative fields. +func Accept(ctx *OpContext, n *Vertex, f Feature) (found, required bool) { + ctx.generation++ + ctx.todo = nil + + var optionalTypes OptionalType + + // TODO(perf): more aggressively determine whether a struct is open or + // closed: open structs do not have to be checked, yet they can particularly + // be the ones with performance isssues, for instanced as a result of + // embedded for comprehensions. + for _, s := range n.Structs { + if !s.useForAccept() { + continue + } + markCounts(ctx, s.CloseInfo) + optionalTypes |= s.types + } + + var str Value + if f.Index() == MaxIndex { + f &= fTypeMask + } else if optionalTypes&(HasComplexPattern|HasDynamic) != 0 && f.IsString() { + str = f.ToValue(ctx) + } + + for _, s := range n.Structs { + if !s.useForAccept() { + continue + } + if verifyArc(ctx, s, f, str) { + // Beware: don't add to below expression: this relies on the + // side effects of markUp. + ok := markUp(ctx, s.closeInfo, 0) + found = found || ok + } + } + + // Reject if any of the roots is not accepted. + for x := ctx.todo; x != nil; x = x.next { + if !x.accepted { + return false, true + } + } + + return found, ctx.todo != nil +} + +func markCounts(ctx *OpContext, info CloseInfo) { + if info.IsClosed { + markRequired(ctx, info.closeInfo) + return + } + for s := info.closeInfo; s != nil; s = s.parent { + if s.isClosed() { + markRequired(ctx, s) + return + } + } +} + +func markRequired(ctx *OpContext, info *closeInfo) { + count := 0 + for ; ; info = info.parent { + var s closeInfo + if info != nil { + s = *info + } + + x := getScratch(ctx, info) + + x.requiredCount += count + + if x.required { + return + } + + if s.span&EmbeddingSpan == 0 { + x.next = ctx.todo + ctx.todo = x + } + + x.required = true + + if info == nil { + return + } + + count = 0 + if s.mode != closeEmbed { + count = 1 + } + } +} + +func markUp(ctx *OpContext, info *closeInfo, count int) bool { + for ; ; info = info.parent { + var s closeInfo + if info != nil { + s = *info + } + + x := getScratch(ctx, info) + + x.acceptedCount += count + + if x.acceptedCount < x.requiredCount { + return false + } + + x.accepted = true + + if info == nil { + return true + } + + count = 0 + if x.required && s.mode != closeEmbed { + count = 1 + } + } +} + +// getScratch: explain generation. +func getScratch(ctx *OpContext, s *closeInfo) *closeStats { + m := ctx.closed + if m == nil { + m = map[*closeInfo]*closeStats{} + ctx.closed = m + } + + x := m[s] + if x == nil { + x = &closeStats{} + m[s] = x + } + + if x.generation != ctx.generation { + *x = closeStats{generation: ctx.generation} + } + + return x +} + +func verifyArc(ctx *OpContext, s *StructInfo, f Feature, label Value) bool { + isRegular := f.IsString() + + o := s.StructLit + env := s.Env + + if isRegular && (len(o.Additional) > 0 || o.IsOpen) { + return true + } + + for _, g := range o.Fields { + if f == g.Label { + return true + } + } + + if !isRegular { + return false + } + + // Do not record errors during this validation. + errs := ctx.errs + defer func() { ctx.errs = errs }() + + if len(o.Dynamic) > 0 && f.IsString() && label != nil { + for _, b := range o.Dynamic { + v := env.evalCached(ctx, b.Key) + s, ok := Unwrap(v).(*String) + if !ok { + continue + } + if label.(*String).Str == s.Str { + return true + } + } + } + + for _, b := range o.Bulk { + if matchBulk(ctx, env, b, f, label) { + return true + } + } + + // TODO(perf): delay adding this position: create a special error type that + // computes all necessary positions on demand. + if ctx != nil { + ctx.AddPosition(s.StructLit) + } + + return false +} diff --git a/vendor/cuelang.org/go/internal/core/adt/closed2.go b/vendor/cuelang.org/go/internal/core/adt/closed2.go new file mode 100644 index 0000000000..ada0342d2d --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/closed2.go @@ -0,0 +1,68 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// CloseDef defines how individual fieldSets (corresponding to conjuncts) +// combine to determine whether a field is contained in a closed set. +// +// A CloseDef combines multiple conjuncts and embeddings. All CloseDefs are +// stored in slice. References to other CloseDefs are indices within this slice. +// Together they define the top of the tree of the expression tree of how +// conjuncts combine together (a canopy). + +// isComplexStruct reports whether the Closed information should be copied as a +// subtree into the parent node using InsertSubtree. If not, the conjuncts can +// just be inserted at the current ID. +func isComplexStruct(ctx *OpContext, v *Vertex) bool { + return v.IsClosedStruct() +} + +// TODO: cleanup code and error messages. Reduce duplication in some related +// code. +func verifyArc2(ctx *OpContext, f Feature, v *Vertex, isClosed bool) (found bool, err *Bottom) { + // Don't check computed, temporary vertices. + if v.Label == InvalidLabel { + return true, nil + } + + // TODO(perf): collect positions in error. + defer ctx.ReleasePositions(ctx.MarkPositions()) + + // Note: it is okay to use parent here as this only needs to be computed + // for the original location. + if ok, required := Accept(ctx, v.Parent, f); ok || (!required && !isClosed) { + return true, nil + } + + if !f.IsString() { + // if f.IsHidden() { Also change Accept in composite.go + return false, nil + } + + if v != nil { + for _, c := range v.Conjuncts { + if pos := c.Field(); pos != nil { + ctx.AddPosition(pos) + } + } + } + + for _, s := range v.Parent.Structs { + s.AddPositions(ctx) + } + + label := f.SelectorString(ctx) + return false, ctx.NewErrf("field not allowed: %s", label) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/composite.go b/vendor/cuelang.org/go/internal/core/adt/composite.go new file mode 100644 index 0000000000..603abf2386 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/composite.go @@ -0,0 +1,838 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "fmt" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// TODO: unanswered questions about structural cycles: +// +// 1. When detecting a structural cycle, should we consider this as: +// a) an unevaluated value, +// b) an incomplete error (which does not affect parent validity), or +// c) a special value. +// +// Making it an error is the simplest way to ensure reentrancy is disallowed: +// without an error it would require an additional mechanism to stop reentrancy +// from continuing to process. Even worse, in some cases it may only partially +// evaluate, resulting in unexpected results. For this reason, we are taking +// approach `b` for now. +// +// This has some consequences of how disjunctions are treated though. Consider +// +// list: { +// head: _ +// tail: list | null +// } +// +// When making it an error, evaluating the above will result in +// +// list: { +// head: _ +// tail: null +// } +// +// because list will result in a structural cycle, and thus an error, it will be +// stripped from the disjunction. This may or may not be a desirable property. A +// nice thing is that it is not required to write `list | *null`. A disadvantage +// is that this is perhaps somewhat inexplicit. +// +// When not making it an error (and simply cease evaluating child arcs upon +// cycle detection), the result would be: +// +// list: { +// head: _ +// tail: list | null +// } +// +// In other words, an evaluation would result in a cycle and thus an error. +// Implementations can recognize such cases by having unevaluated arcs. An +// explicit structure cycle marker would probably be less error prone. +// +// Note that in both cases, a reference to list will still use the original +// conjuncts, so the result will be the same for either method in this case. +// +// +// 2. Structural cycle allowance. +// +// Structural cycle detection disallows reentrancy as well. This means one +// cannot use structs for recursive computation. This will probably preclude +// evaluation of some configuration. Given that there is no real alternative +// yet, we could allow structural cycle detection to be optionally disabled. + +// An Environment links the parent scopes for identifier lookup to a composite +// node. Each conjunct that make up node in the tree can be associated with +// a different environment (although some conjuncts may share an Environment). +type Environment struct { + Up *Environment + Vertex *Vertex + + // DynamicLabel is only set when instantiating a field from a pattern + // constraint. It is used to resolve label references. + DynamicLabel Feature + + // TODO(perf): make the following public fields a shareable struct as it + // mostly is going to be the same for child nodes. + + // Cyclic indicates a structural cycle was detected for this conjunct or one + // of its ancestors. + Cyclic bool + + // Deref keeps track of nodes that should dereference to Vertex. It is used + // for detecting structural cycle. + // + // The detection algorithm is based on Tomabechi's quasi-destructive graph + // unification. This detection requires dependencies to be resolved into + // fully dereferenced vertices. This is not the case in our algorithm: + // the result of evaluating conjuncts is placed into dereferenced vertices + // _after_ they are evaluated, but the Environment still points to the + // non-dereferenced context. + // + // In order to be able to detect structural cycles, we need to ensure that + // at least one node that is part of a cycle in the context in which + // conjunctions are evaluated dereferences correctly. + // + // The only field necessary to detect a structural cycle, however, is + // the Status field of the Vertex. So rather than dereferencing a node + // proper, it is sufficient to copy the Status of the dereferenced nodes + // to these nodes (will always be EvaluatingArcs). + Deref []*Vertex + + // Cycles contains vertices for which cycles are detected. It is used + // for tracking self-references within structural cycles. + // + // Unlike Deref, Cycles is not incremented with child nodes. + // TODO: Cycles is always a tail end of Deref, so this can be optimized. + Cycles []*Vertex + + cache map[Expr]Value +} + +type ID int32 + +// evalCached is used to look up let expressions. Caching let expressions +// prevents a possible combinatorial explosion. +func (e *Environment) evalCached(c *OpContext, x Expr) Value { + if v, ok := x.(Value); ok { + return v + } + v, ok := e.cache[x] + if !ok { + if e.cache == nil { + e.cache = map[Expr]Value{} + } + env, src := c.e, c.src + c.e, c.src = e, x.Source() + v = c.evalState(x, Partial) // TODO: should this be Finalized? + c.e, c.src = env, src + if b, ok := v.(*Bottom); !ok || !b.IsIncomplete() { + e.cache[x] = v + } + } + return v +} + +// A Vertex is a node in the value tree. It may be a leaf or internal node. +// It may have arcs to represent elements of a fully evaluated struct or list. +// +// For structs, it only contains definitions and concrete fields. +// optional fields are dropped. +// +// It maintains source information such as a list of conjuncts that contributed +// to the value. +type Vertex struct { + // Parent links to a parent Vertex. This parent should only be used to + // access the parent's Label field to find the relative location within a + // tree. + Parent *Vertex + + // Label is the feature leading to this vertex. + Label Feature + + // State: + // eval: nil, BaseValue: nil -- unevaluated + // eval: *, BaseValue: nil -- evaluating + // eval: *, BaseValue: * -- finalized + // + state *nodeContext + // TODO: move the following status fields to nodeContext. + + // status indicates the evaluation progress of this vertex. + status VertexStatus + + // isData indicates that this Vertex is to be interepreted as data: pattern + // and additional constraints, as well as optional fields, should be + // ignored. + isData bool + Closed bool + nonMonotonicReject bool + nonMonotonicInsertGen int32 + nonMonotonicLookupGen int32 + + // EvalCount keeps track of temporary dereferencing during evaluation. + // If EvalCount > 0, status should be considered to be EvaluatingArcs. + EvalCount int32 + + // SelfCount is used for tracking self-references. + SelfCount int32 + + // BaseValue is the value associated with this vertex. For lists and structs + // this is a sentinel value indicating its kind. + BaseValue BaseValue + + // ChildErrors is the collection of all errors of children. + ChildErrors *Bottom + + // The parent of nodes can be followed to determine the path within the + // configuration of this node. + // Value Value + Arcs []*Vertex // arcs are sorted in display order. + + // Conjuncts lists the structs that ultimately formed this Composite value. + // This includes all selected disjuncts. + // + // This value may be nil, in which case the Arcs are considered to define + // the final value of this Vertex. + Conjuncts []Conjunct + + // Structs is a slice of struct literals that contributed to this value. + // This information is used to compute the topological sort of arcs. + Structs []*StructInfo +} + +func (v *Vertex) Clone() *Vertex { + c := *v + c.state = nil + return &c +} + +type StructInfo struct { + *StructLit + + Env *Environment + + CloseInfo + + // Embed indicates the struct in which this struct is embedded (originally), + // or nil if this is a root structure. + // Embed *StructInfo + // Context *RefInfo // the location from which this struct originates. + Disable bool + + Embedding bool +} + +// TODO(perf): this could be much more aggressive for eliminating structs that +// are immaterial for closing. +func (s *StructInfo) useForAccept() bool { + if c := s.closeInfo; c != nil { + return !c.noCheck + } + return true +} + +// VertexStatus indicates the evaluation progress of a Vertex. +type VertexStatus int8 + +const ( + // Unprocessed indicates a Vertex has not been processed before. + // Value must be nil. + Unprocessed VertexStatus = iota + + // Evaluating means that the current Vertex is being evaluated. If this is + // encountered it indicates a reference cycle. Value must be nil. + Evaluating + + // Partial indicates that the result was only partially evaluated. It will + // need to be fully evaluated to get a complete results. + // + // TODO: this currently requires a renewed computation. Cache the + // nodeContext to allow reusing the computations done so far. + Partial + + // AllArcs is request only. It must be past Partial, but + // before recursively resolving arcs. + AllArcs + + // EvaluatingArcs indicates that the arcs of the Vertex are currently being + // evaluated. If this is encountered it indicates a structural cycle. + // Value does not have to be nil + EvaluatingArcs + + // Finalized means that this node is fully evaluated and that the results + // are save to use without further consideration. + Finalized +) + +func (s VertexStatus) String() string { + switch s { + case Unprocessed: + return "unprocessed" + case Evaluating: + return "evaluating" + case Partial: + return "partial" + case AllArcs: + return "allarcs" + case EvaluatingArcs: + return "evaluatingArcs" + case Finalized: + return "finalized" + default: + return "unknown" + } +} + +func (v *Vertex) Status() VertexStatus { + if v.EvalCount > 0 { + return EvaluatingArcs + } + return v.status +} + +func (v *Vertex) UpdateStatus(s VertexStatus) { + Assertf(v.status <= s+1, "attempt to regress status from %d to %d", v.Status(), s) + + if s == Finalized && v.BaseValue == nil { + // panic("not finalized") + } + v.status = s +} + +// Value returns the Value of v without definitions if it is a scalar +// or itself otherwise. +func (v *Vertex) Value() Value { + switch x := v.BaseValue.(type) { + case nil: + return nil + case *StructMarker, *ListMarker: + return v + case Value: + // TODO: recursively descend into Vertex? + return x + default: + panic(fmt.Sprintf("unexpected type %T", v.BaseValue)) + } +} + +// isUndefined reports whether a vertex does not have a useable BaseValue yet. +func (v *Vertex) isUndefined() bool { + switch v.BaseValue { + case nil, cycle: + return true + } + return false +} + +func (x *Vertex) IsConcrete() bool { + return x.Concreteness() <= Concrete +} + +// IsData reports whether v should be interpreted in data mode. In other words, +// it tells whether optional field matching and non-regular fields, like +// definitions and hidden fields, should be ignored. +func (v *Vertex) IsData() bool { + return v.isData || len(v.Conjuncts) == 0 +} + +// ToDataSingle creates a new Vertex that represents just the regular fields +// of this vertex. Arcs are left untouched. +// It is used by cue.Eval to convert nodes to data on per-node basis. +func (v *Vertex) ToDataSingle() *Vertex { + w := *v + w.isData = true + w.state = nil + w.status = Finalized + return &w +} + +// ToDataAll returns a new v where v and all its descendents contain only +// the regular fields. +func (v *Vertex) ToDataAll() *Vertex { + arcs := make([]*Vertex, 0, len(v.Arcs)) + for _, a := range v.Arcs { + if a.Label.IsRegular() { + arcs = append(arcs, a.ToDataAll()) + } + } + w := *v + w.state = nil + w.status = Finalized + + w.BaseValue = toDataAll(w.BaseValue) + w.Arcs = arcs + w.isData = true + w.Conjuncts = make([]Conjunct, len(v.Conjuncts)) + // TODO(perf): this is not strictly necessary for evaluation, but it can + // hurt performance greatly. Drawback is that it may disable ordering. + for _, s := range w.Structs { + s.Disable = true + } + copy(w.Conjuncts, v.Conjuncts) + for i, c := range w.Conjuncts { + if v, _ := c.x.(Value); v != nil { + w.Conjuncts[i].x = toDataAll(v).(Value) + } + } + return &w +} + +func toDataAll(v BaseValue) BaseValue { + switch x := v.(type) { + default: + return x + + case *Vertex: + return x.ToDataAll() + + // The following cases are always erroneous, but we handle them anyway + // to avoid issues with the closedness algorithm down the line. + case *Disjunction: + d := *x + d.Values = make([]*Vertex, len(x.Values)) + for i, v := range x.Values { + d.Values[i] = v.ToDataAll() + } + return &d + + case *Conjunction: + c := *x + c.Values = make([]Value, len(x.Values)) + for i, v := range x.Values { + // This case is okay because the source is of type Value. + c.Values[i] = toDataAll(v).(Value) + } + return &c + } +} + +// func (v *Vertex) IsEvaluating() bool { +// return v.Value == cycle +// } + +func (v *Vertex) IsErr() bool { + // if v.Status() > Evaluating { + if _, ok := v.BaseValue.(*Bottom); ok { + return true + } + // } + return false +} + +func (v *Vertex) Err(c *OpContext, state VertexStatus) *Bottom { + c.Unify(v, state) + if b, ok := v.BaseValue.(*Bottom); ok { + return b + } + return nil +} + +// func (v *Vertex) Evaluate() + +func (v *Vertex) Finalize(c *OpContext) { + // Saving and restoring the error context prevents v from panicking in + // case the caller did not handle existing errors in the context. + err := c.errs + c.errs = nil + c.Unify(v, Finalized) + c.errs = err +} + +func (v *Vertex) AddErr(ctx *OpContext, b *Bottom) { + v.SetValue(ctx, Finalized, CombineErrors(nil, v.Value(), b)) +} + +func (v *Vertex) SetValue(ctx *OpContext, state VertexStatus, value BaseValue) *Bottom { + v.BaseValue = value + v.UpdateStatus(state) + return nil +} + +// ToVertex wraps v in a new Vertex, if necessary. +func ToVertex(v Value) *Vertex { + switch x := v.(type) { + case *Vertex: + return x + default: + n := &Vertex{ + status: Finalized, + BaseValue: x, + } + n.AddConjunct(MakeRootConjunct(nil, v)) + return n + } +} + +// Unwrap returns the possibly non-concrete scalar value of v or nil if v is +// a list, struct or of undefined type. +func Unwrap(v Value) Value { + x, ok := v.(*Vertex) + if !ok { + return v + } + x = x.Indirect() + if n := x.state; n != nil && isCyclePlaceholder(x.BaseValue) { + if n.errs != nil && !n.errs.IsIncomplete() { + return n.errs + } + if n.scalar != nil { + return n.scalar + } + } + return x.Value() +} + +// Indirect unrolls indirections of Vertex values. These may be introduced, +// for instance, by temporary bindings such as comprehension values. +// It returns v itself if v does not point to another Vertex. +func (v *Vertex) Indirect() *Vertex { + for { + arc, ok := v.BaseValue.(*Vertex) + if !ok { + return v + } + v = arc + } +} + +// OptionalType is a bit field of the type of optional constraints in use by an +// Acceptor. +type OptionalType int8 + +const ( + HasField OptionalType = 1 << iota // X: T + HasDynamic // (X): T or "\(X)": T + HasPattern // [X]: T + HasComplexPattern // anything but a basic type + HasAdditional // ...T + IsOpen // Defined for all fields +) + +func (v *Vertex) Kind() Kind { + // This is possible when evaluating comprehensions. It is potentially + // not known at this time what the type is. + switch { + case v.state != nil: + return v.state.kind + case v.BaseValue == nil: + return TopKind + default: + return v.BaseValue.Kind() + } +} + +func (v *Vertex) OptionalTypes() OptionalType { + var mask OptionalType + for _, s := range v.Structs { + mask |= s.OptionalTypes() + } + return mask +} + +// IsOptional reports whether a field is explicitly defined as optional, +// as opposed to whether it is allowed by a pattern constraint. +func (v *Vertex) IsOptional(label Feature) bool { + for _, s := range v.Structs { + if s.IsOptional(label) { + return true + } + } + return false +} + +func (v *Vertex) accepts(ok, required bool) bool { + return ok || (!required && !v.Closed) +} + +func (v *Vertex) IsClosedStruct() bool { + switch x := v.BaseValue.(type) { + default: + return false + + case *StructMarker: + if x.NeedClose { + return true + } + + case *Disjunction: + } + return v.Closed || isClosed(v) +} + +func (v *Vertex) IsClosedList() bool { + if x, ok := v.BaseValue.(*ListMarker); ok { + return !x.IsOpen + } + return false +} + +// TODO: return error instead of boolean? (or at least have version that does.) +func (v *Vertex) Accept(ctx *OpContext, f Feature) bool { + if x, ok := v.BaseValue.(*Disjunction); ok { + for _, v := range x.Values { + if v.Accept(ctx, f) { + return true + } + } + return false + } + + if f.IsInt() { + switch v.BaseValue.(type) { + case *ListMarker: + // TODO(perf): use precomputed length. + if f.Index() < len(v.Elems()) { + return true + } + return !v.IsClosedList() + + default: + return v.Kind()&ListKind != 0 + } + } + + if k := v.Kind(); k&StructKind == 0 && f.IsString() { + // If the value is bottom, we may not really know if this used to + // be a struct. + if k != BottomKind || len(v.Structs) == 0 { + return false + } + } + + if f.IsHidden() || !v.IsClosedStruct() || v.Lookup(f) != nil { + return true + } + + // TODO(perf): collect positions in error. + defer ctx.ReleasePositions(ctx.MarkPositions()) + + return v.accepts(Accept(ctx, v, f)) +} + +// MatchAndInsert finds the conjuncts for optional fields, pattern +// constraints, and additional constraints that match f and inserts them in +// arc. Use f is 0 to match all additional constraints only. +func (v *Vertex) MatchAndInsert(ctx *OpContext, arc *Vertex) { + if !v.Accept(ctx, arc.Label) { + return + } + + // Go backwards to simulate old implementation. + for i := len(v.Structs) - 1; i >= 0; i-- { + s := v.Structs[i] + if s.Disable { + continue + } + s.MatchAndInsert(ctx, arc) + } +} + +func (v *Vertex) IsList() bool { + _, ok := v.BaseValue.(*ListMarker) + return ok +} + +// Lookup returns the Arc with label f if it exists or nil otherwise. +func (v *Vertex) Lookup(f Feature) *Vertex { + for _, a := range v.Arcs { + if a.Label == f { + a = a.Indirect() + return a + } + } + return nil +} + +// Elems returns the regular elements of a list. +func (v *Vertex) Elems() []*Vertex { + // TODO: add bookkeeping for where list arcs start and end. + a := make([]*Vertex, 0, len(v.Arcs)) + for _, x := range v.Arcs { + if x.Label.IsInt() { + a = append(a, x) + } + } + return a +} + +// GetArc returns a Vertex for the outgoing arc with label f. It creates and +// ads one if it doesn't yet exist. +func (v *Vertex) GetArc(c *OpContext, f Feature) (arc *Vertex, isNew bool) { + arc = v.Lookup(f) + if arc == nil { + for _, a := range v.state.usedArcs { + if a.Label == f { + arc = a + v.Arcs = append(v.Arcs, arc) + isNew = true + if c.nonMonotonicInsertNest > 0 { + a.nonMonotonicInsertGen = c.nonMonotonicGeneration + } + break + } + } + } + if arc == nil { + arc = &Vertex{Parent: v, Label: f} + v.Arcs = append(v.Arcs, arc) + isNew = true + if c.nonMonotonicInsertNest > 0 { + arc.nonMonotonicInsertGen = c.nonMonotonicGeneration + } + } + if c.nonMonotonicInsertNest == 0 { + arc.nonMonotonicInsertGen = 0 + } + return arc, isNew +} + +func (v *Vertex) Source() ast.Node { + if v != nil { + if b, ok := v.BaseValue.(Value); ok { + return b.Source() + } + } + return nil +} + +// AddConjunct adds the given Conjuncts to v if it doesn't already exist. +func (v *Vertex) AddConjunct(c Conjunct) *Bottom { + if v.BaseValue != nil { + // TODO: investigate why this happens at all. Removing it seems to + // change the order of fields in some cases. + // + // This is likely a bug in the evaluator and should not happen. + return &Bottom{Err: errors.Newf(token.NoPos, "cannot add conjunct")} + } + v.addConjunct(c) + return nil +} + +func (v *Vertex) addConjunct(c Conjunct) { + for _, x := range v.Conjuncts { + if x == c { + return + } + } + v.Conjuncts = append(v.Conjuncts, c) +} + +func (v *Vertex) AddStruct(s *StructLit, env *Environment, ci CloseInfo) *StructInfo { + info := StructInfo{ + StructLit: s, + Env: env, + CloseInfo: ci, + } + for _, t := range v.Structs { + if *t == info { + return t + } + } + t := &info + v.Structs = append(v.Structs, t) + return t +} + +// Path computes the sequence of Features leading from the root to of the +// instance to this Vertex. +// +// NOTE: this is for debugging purposes only. +func (v *Vertex) Path() []Feature { + return appendPath(nil, v) +} + +func appendPath(a []Feature, v *Vertex) []Feature { + if v.Parent == nil { + return a + } + a = appendPath(a, v.Parent) + if v.Label != 0 { + // A Label may be 0 for programmatically inserted nodes. + a = append(a, v.Label) + } + return a +} + +// An Conjunct is an Environment-Expr pair. The Environment is the starting +// point for reference lookup for any reference contained in X. +type Conjunct struct { + Env *Environment + x Node + + // CloseInfo is a unique number that tracks a group of conjuncts that need + // belong to a single originating definition. + CloseInfo CloseInfo +} + +// TODO(perf): replace with composite literal if this helps performance. + +// MakeRootConjunct creates a conjunct from the given environment and node. +// It panics if x cannot be used as an expression. +func MakeRootConjunct(env *Environment, x Node) Conjunct { + return MakeConjunct(env, x, CloseInfo{}) +} + +func MakeConjunct(env *Environment, x Node, id CloseInfo) Conjunct { + if env == nil { + // TODO: better is to pass one. + env = &Environment{} + } + switch x.(type) { + case Elem, interface{ expr() Expr }: + default: + panic(fmt.Sprintf("invalid Node type %T", x)) + } + return Conjunct{env, x, id} +} + +func (c *Conjunct) Source() ast.Node { + return c.x.Source() +} + +func (c *Conjunct) Field() Node { + return c.x +} + +// Elem retrieves the Elem form of the contained conjunct. +// If it is a Field, it will return the field value. +func (c *Conjunct) Elem() Elem { + switch x := c.x.(type) { + case interface{ expr() Expr }: + return x.expr() + case Elem: + return x + default: + panic("unreachable") + } +} + +// Expr retrieves the expression form of the contained conjunct. +// If it is a field or comprehension, it will return its associated value. +func (c *Conjunct) Expr() Expr { + switch x := c.x.(type) { + case Expr: + return x + // TODO: comprehension. + case interface{ expr() Expr }: + return x.expr() + default: + panic("unreachable") + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/comprehension.go b/vendor/cuelang.org/go/internal/core/adt/comprehension.go new file mode 100644 index 0000000000..804dc39579 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/comprehension.go @@ -0,0 +1,70 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +type envYield struct { + comp *Comprehension + env *Environment + id CloseInfo + err *Bottom +} + +func (n *nodeContext) insertComprehension(env *Environment, x *Comprehension, ci CloseInfo) { + n.comprehensions = append(n.comprehensions, envYield{x, env, ci, nil}) +} + +// injectComprehensions evaluates and inserts comprehensions. +func (n *nodeContext) injectComprehensions(all *[]envYield) (progress bool) { + ctx := n.ctx + + k := 0 + for i := 0; i < len(*all); i++ { + d := (*all)[i] + + sa := []*Environment{} + f := func(env *Environment) { + sa = append(sa, env) + } + + if err := ctx.Yield(d.env, d.comp, f); err != nil { + if err.IsIncomplete() { + d.err = err + (*all)[k] = d + k++ + } else { + // continue to collect other errors. + n.addBottom(err) + } + continue + } + + if len(sa) == 0 { + continue + } + id := d.id.SpawnSpan(d.comp.Clauses, ComprehensionSpan) + + n.ctx.nonMonotonicInsertNest++ + for _, env := range sa { + n.addExprConjunct(Conjunct{env, d.comp.Value, id}) + } + n.ctx.nonMonotonicInsertNest-- + } + + progress = k < len(*all) + + *all = (*all)[:k] + + return progress +} diff --git a/vendor/cuelang.org/go/internal/core/adt/context.go b/vendor/cuelang.org/go/internal/core/adt/context.go new file mode 100644 index 0000000000..e7a6412beb --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/context.go @@ -0,0 +1,1283 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "fmt" + "log" + "os" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/cockroachdb/apd/v2" + "golang.org/x/text/encoding/unicode" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// Debug sets whether extra aggressive checking should be done. +// This should typically default to true for pre-releases and default to +// false otherwise. +var Debug bool = os.Getenv("CUE_DEBUG") != "0" + +// Verbosity sets the log level. There are currently only two levels: +// 0: no logging +// 1: logging +var Verbosity int + +// DebugSort specifies that arcs be sorted consistently between implementations. +// 0: default +// 1: sort by Feature: this should be consistent between implementations where +// there is no change in the compiler and indexing code. +// 2: alphabetical +var DebugSort int + +func DebugSortArcs(c *OpContext, n *Vertex) { + if n.IsList() { + return + } + switch a := n.Arcs; DebugSort { + case 1: + sort.SliceStable(a, func(i, j int) bool { + return a[i].Label < a[j].Label + }) + case 2: + sort.SliceStable(a, func(i, j int) bool { + return a[i].Label.SelectorString(c.Runtime) < + a[j].Label.SelectorString(c.Runtime) + }) + } +} + +func DebugSortFields(c *OpContext, a []Feature) { + switch DebugSort { + case 1: + sort.SliceStable(a, func(i, j int) bool { + return a[i] < a[j] + }) + case 2: + sort.SliceStable(a, func(i, j int) bool { + return a[i].SelectorString(c.Runtime) < + a[j].SelectorString(c.Runtime) + }) + } +} + +// Assert panics if the condition is false. Assert can be used to check for +// conditions that are considers to break an internal variant or unexpected +// condition, but that nonetheless probably will be handled correctly down the +// line. For instance, a faulty condition could lead to to error being caught +// down the road, but resulting in an inaccurate error message. In production +// code it is better to deal with the bad error message than to panic. +// +// It is advisable for each use of Assert to document how the error is expected +// to be handled down the line. +func Assertf(b bool, format string, args ...interface{}) { + if Debug && !b { + panic(fmt.Sprintf("assertion failed: "+format, args...)) + } +} + +// Assertf either panics or reports an error to c if the condition is not met. +func (c *OpContext) Assertf(pos token.Pos, b bool, format string, args ...interface{}) { + if !b { + if Debug { + panic(fmt.Sprintf("assertion failed: "+format, args...)) + } + c.addErrf(0, pos, format, args...) + } +} + +func init() { + log.SetFlags(log.Lshortfile) +} + +func Logf(format string, args ...interface{}) { + if Verbosity == 0 { + return + } + s := fmt.Sprintf(format, args...) + _ = log.Output(2, s) +} + +var pMap = map[*Vertex]int{} + +func (c *OpContext) Logf(v *Vertex, format string, args ...interface{}) { + if Verbosity == 0 { + return + } + if v == nil { + s := fmt.Sprintf(strings.Repeat("..", c.nest)+format, args...) + _ = log.Output(2, s) + return + } + p := pMap[v] + if p == 0 { + p = len(pMap) + 1 + pMap[v] = p + } + a := append([]interface{}{ + strings.Repeat("..", c.nest), + p, + v.Label.SelectorString(c), + v.Path(), + }, args...) + for i := 2; i < len(a); i++ { + switch x := a[i].(type) { + case Node: + a[i] = c.Str(x) + case Feature: + a[i] = x.SelectorString(c) + } + } + s := fmt.Sprintf("%s [%d] %s/%v"+format, a...) + _ = log.Output(2, s) +} + +// Runtime defines an interface for low-level representation conversion and +// lookup. +type Runtime interface { + // StringIndexer allows for converting string labels to and from a + // canonical numeric representation. + StringIndexer + + // LoadImport loads a unique Vertex associated with a given import path. It + // returns nil if no import for this package could be found. + LoadImport(importPath string) *Vertex + + // StoreType associates a CUE expression with a Go type. + StoreType(t reflect.Type, src ast.Expr, expr Expr) + + // LoadType retrieves a previously stored CUE expression for a given Go + // type if available. + LoadType(t reflect.Type) (src ast.Expr, expr Expr, ok bool) +} + +type Config struct { + Runtime + Format func(Node) string +} + +// New creates an operation context. +func New(v *Vertex, cfg *Config) *OpContext { + if cfg.Runtime == nil { + panic("nil Runtime") + } + ctx := &OpContext{ + Runtime: cfg.Runtime, + Format: cfg.Format, + vertex: v, + } + if v != nil { + ctx.e = &Environment{Up: nil, Vertex: v} + } + return ctx +} + +// An OpContext implements CUE's unification operation. It's operations only +// operation on values that are created with the Runtime with which an OpContext +// is associated. An OpContext is not goroutine save and only one goroutine may +// use an OpContext at a time. +// +type OpContext struct { + Runtime + Format func(Node) string + + nest int + + stats Stats + freeListNode *nodeContext + + e *Environment + src ast.Node + errs *Bottom + positions []Node // keep track of error positions + + // vertex is used to determine the path location in case of error. Turning + // this into a stack could also allow determining the cyclic path for + // structural cycle errors. + vertex *Vertex + + nonMonotonicLookupNest int32 + nonMonotonicRejectNest int32 + nonMonotonicInsertNest int32 + nonMonotonicGeneration int32 + + // These fields are used associate scratch fields for computing closedness + // of a Vertex. These fields could have been included in StructInfo (like + // Tomabechi's unification algorithm), but we opted for an indirection to + // allow concurrent unification. + // + // TODO(perf): have two generations: one for each pass of the closedness + // algorithm, so that the results of the first pass can be reused for all + // features of a node. + generation int + closed map[*closeInfo]*closeStats + todo *closeStats + + // inDisjunct indicates that non-monotonic checks should be skipped. + // This is used if we want to do some extra work to eliminate disjunctions + // early. The result of unificantion should be thrown away if this check is + // used. + // + // TODO: replace this with a mechanism to determine the correct set (per + // conjunct) of StructInfos to include in closedness checking. + inDisjunct int + + // inConstaint overrides inDisjunct as field matching should always be + // enabled. + inConstraint int +} + +func (n *nodeContext) skipNonMonotonicChecks() bool { + if n.ctx.inConstraint > 0 { + return false + } + return n.ctx.inDisjunct > 0 +} + +// Impl is for internal use only. This will go. +func (c *OpContext) Impl() Runtime { + return c.Runtime +} + +func (c *OpContext) Pos() token.Pos { + if c.src == nil { + return token.NoPos + } + return c.src.Pos() +} + +func (c *OpContext) Source() ast.Node { + return c.src +} + +// NewContext creates an operation context. +func NewContext(r Runtime, v *Vertex) *OpContext { + return New(v, &Config{Runtime: r}) +} + +func (c *OpContext) pos() token.Pos { + if c.src == nil { + return token.NoPos + } + return c.src.Pos() +} + +func (c *OpContext) spawn(node *Vertex) *Environment { + node.Parent = c.e.Vertex // TODO: Is this necessary? + return &Environment{ + Up: c.e, + Vertex: node, + + // Copy cycle data. + Cyclic: c.e.Cyclic, + Deref: c.e.Deref, + Cycles: c.e.Cycles, + } +} + +func (c *OpContext) Env(upCount int32) *Environment { + e := c.e + for ; upCount > 0; upCount-- { + e = e.Up + } + return e +} + +func (c *OpContext) relNode(upCount int32) *Vertex { + e := c.e + for ; upCount > 0; upCount-- { + e = e.Up + } + c.Unify(e.Vertex, Partial) + return e.Vertex +} + +func (c *OpContext) relLabel(upCount int32) Feature { + // locate current label. + e := c.e + for ; upCount > 0; upCount-- { + e = e.Up + } + return e.DynamicLabel +} + +func (c *OpContext) concreteIsPossible(op Op, x Expr) bool { + if !AssertConcreteIsPossible(op, x) { + // No need to take position of expression. + c.AddErr(c.NewPosf(token.NoPos, + "invalid operand %s ('%s' requires concrete value)", x, op)) + return false + } + return true +} + +// Assert that the given expression can evaluate to a concrete value. +func AssertConcreteIsPossible(op Op, x Expr) bool { + switch v := x.(type) { + case *Bottom: + case *BoundExpr: + return false + case Value: + return v.Concreteness() == Concrete + } + return true +} + +// HasErr reports whether any error was reported, including whether value +// was incomplete. +func (c *OpContext) HasErr() bool { + return c.errs != nil +} + +func (c *OpContext) Err() *Bottom { + b := c.errs + c.errs = nil + return b +} + +func (c *OpContext) addErrf(code ErrorCode, pos token.Pos, msg string, args ...interface{}) { + err := c.NewPosf(pos, msg, args...) + c.addErr(code, err) +} + +func (c *OpContext) addErr(code ErrorCode, err errors.Error) { + c.AddBottom(&Bottom{Code: code, Err: err}) +} + +// AddBottom records an error in OpContext. +func (c *OpContext) AddBottom(b *Bottom) { + c.errs = CombineErrors(c.src, c.errs, b) +} + +// AddErr records an error in OpContext. It returns errors collected so far. +func (c *OpContext) AddErr(err errors.Error) *Bottom { + if err != nil { + c.AddBottom(&Bottom{Err: err}) + } + return c.errs +} + +// NewErrf creates a *Bottom value and returns it. The returned uses the +// current source as the point of origin of the error. +func (c *OpContext) NewErrf(format string, args ...interface{}) *Bottom { + // TODO: consider renaming ot NewBottomf: this is now confusing as we also + // have Newf. + err := c.Newf(format, args...) + return &Bottom{Src: c.src, Err: err, Code: EvalError} +} + +// AddErrf records an error in OpContext. It returns errors collected so far. +func (c *OpContext) AddErrf(format string, args ...interface{}) *Bottom { + return c.AddErr(c.Newf(format, args...)) +} + +type frame struct { + env *Environment + err *Bottom + src ast.Node +} + +func (c *OpContext) PushState(env *Environment, src ast.Node) (saved frame) { + saved.env = c.e + saved.err = c.errs + saved.src = c.src + + c.errs = nil + if src != nil { + c.src = src + } + c.e = env + + return saved +} + +func (c *OpContext) PopState(s frame) *Bottom { + err := c.errs + c.e = s.env + c.errs = s.err + c.src = s.src + return err +} + +// PushArc signals c that arc v is currently being processed for the purpose +// of error reporting. PopArc should be called with the returned value once +// processing of v is completed. +func (c *OpContext) PushArc(v *Vertex) (saved *Vertex) { + c.vertex, saved = v, c.vertex + return saved +} + +// PopArc signals completion of processing the current arc. +func (c *OpContext) PopArc(saved *Vertex) { + c.vertex = saved +} + +// Resolve finds a node in the tree. +// +// Should only be used to insert Conjuncts. TODO: perhaps only return Conjuncts +// and error. +func (c *OpContext) Resolve(env *Environment, r Resolver) (*Vertex, *Bottom) { + s := c.PushState(env, r.Source()) + + arc := r.resolve(c, Partial) + + err := c.PopState(s) + if err != nil { + return nil, err + } + + if arc.ChildErrors != nil && arc.ChildErrors.Code == StructuralCycleError { + return nil, arc.ChildErrors + } + + arc = arc.Indirect() + + return arc, err +} + +// Validate calls validates value for the given validator. +// +// TODO(errors): return boolean instead: only the caller has enough information +// to generate a proper error message. +func (c *OpContext) Validate(check Validator, value Value) *Bottom { + // TODO: use a position stack to push both values. + saved := c.src + c.src = check.Source() + + err := check.validate(c, value) + + c.src = saved + + return err +} + +// Yield evaluates a Yielder and calls f for each result. +func (c *OpContext) Yield(env *Environment, comp *Comprehension, f YieldFunc) *Bottom { + y := comp.Clauses + + s := c.PushState(env, y.Source()) + + y.yield(c, f) + + return c.PopState(s) + +} + +// Concrete returns the concrete value of x after evaluating it. +// msg is used to mention the context in which an error occurred, if any. +func (c *OpContext) Concrete(env *Environment, x Expr, msg interface{}) (result Value, complete bool) { + + w, complete := c.Evaluate(env, x) + + w, ok := c.getDefault(w) + if !ok { + return w, false + } + v := Unwrap(w) + + if !IsConcrete(v) { + complete = false + b := c.NewErrf("non-concrete value %v in operand to %s", w, msg) + b.Code = IncompleteError + v = b + } + + if !complete { + return v, complete + } + + return v, true +} + +// getDefault resolves a disjunction to a single value. If there is no default +// value, or if there is more than one default value, it reports an "incomplete" +// error and return false. In all other cases it will return true, even if +// v is already an error. v may be nil, in which case it will also return nil. +func (c *OpContext) getDefault(v Value) (result Value, ok bool) { + var d *Disjunction + switch x := v.(type) { + default: + return v, true + + case *Vertex: + // TODO: return vertex if not disjunction. + switch t := x.BaseValue.(type) { + case *Disjunction: + d = t + + case *Vertex: + return c.getDefault(t) + + default: + return x, true + } + + case *Disjunction: + d = x + } + + if d.NumDefaults != 1 { + c.addErrf(IncompleteError, c.pos(), + "unresolved disjunction %s (type %s)", d, d.Kind()) + return nil, false + } + return c.getDefault(d.Values[0]) +} + +// Evaluate evaluates an expression within the given environment and indicates +// whether the result is complete. It will always return a non-nil result. +func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete bool) { + s := c.PushState(env, x.Source()) + + val := c.evalState(x, Partial) + + complete = true + + if err, _ := val.(*Bottom); err != nil && err.IsIncomplete() { + complete = false + } + if val == nil { + complete = false + // TODO ENSURE THIS DOESN"T HAPPEN> + val = &Bottom{ + Code: IncompleteError, + Err: c.Newf("UNANTICIPATED ERROR"), + } + + } + + _ = c.PopState(s) + + if !complete || val == nil { + return val, false + } + + return val, true +} + +func (c *OpContext) evaluateRec(env *Environment, x Expr, state VertexStatus) Value { + s := c.PushState(env, x.Source()) + + val := c.evalState(x, state) + if val == nil { + // Be defensive: this never happens, but just in case. + Assertf(false, "nil return value: unspecified error") + val = &Bottom{ + Code: IncompleteError, + Err: c.Newf("UNANTICIPATED ERROR"), + } + } + _ = c.PopState(s) + + return val +} + +// value evaluates expression v within the current environment. The result may +// be nil if the result is incomplete. value leaves errors untouched to that +// they can be collected by the caller. +func (c *OpContext) value(x Expr) (result Value) { + v := c.evalState(x, Partial) + + v, _ = c.getDefault(v) + v = Unwrap(v) + return v +} + +func (c *OpContext) evalState(v Expr, state VertexStatus) (result Value) { + savedSrc := c.src + c.src = v.Source() + err := c.errs + c.errs = nil + + defer func() { + c.errs = CombineErrors(c.src, c.errs, err) + + if v, ok := result.(*Vertex); ok { + if b, _ := v.BaseValue.(*Bottom); b != nil { + switch b.Code { + case IncompleteError: + case CycleError: + if state == Partial { + break + } + fallthrough + default: + result = b + } + } + } + + // TODO: remove this when we handle errors more principally. + if b, ok := result.(*Bottom); ok { + if c.src != nil && + b.Code == CycleError && + len(errors.Positions(b.Err)) == 0 { + bb := *b + bb.Err = errors.Wrapf(b.Err, c.src.Pos(), "") + result = &bb + } + if c.errs != result { + c.errs = CombineErrors(c.src, c.errs, result) + } + } + if c.errs != nil { + result = c.errs + } + c.src = savedSrc + }() + + switch x := v.(type) { + case Value: + return x + + case Evaluator: + v := x.evaluate(c) + return v + + case Resolver: + arc := x.resolve(c, state) + if c.HasErr() { + return nil + } + if arc == nil { + return nil + } + + v := c.evaluate(arc, state) + return v + + default: + // This can only happen, really, if v == nil, which is not allowed. + panic(fmt.Sprintf("unexpected Expr type %T", v)) + } +} + +// unifyNode returns a possibly partially evaluated node value. +// +// TODO: maybe return *Vertex, *Bottom +// +func (c *OpContext) unifyNode(v Expr, state VertexStatus) (result Value) { + savedSrc := c.src + c.src = v.Source() + err := c.errs + c.errs = nil + + defer func() { + c.errs = CombineErrors(c.src, c.errs, err) + + if v, ok := result.(*Vertex); ok { + if b, _ := v.BaseValue.(*Bottom); b != nil { + switch b.Code { + case IncompleteError: + case CycleError: + if state == Partial { + break + } + fallthrough + default: + result = b + } + } + } + + // TODO: remove this when we handle errors more principally. + if b, ok := result.(*Bottom); ok { + if c.src != nil && + b.Code == CycleError && + b.Err.Position() == token.NoPos && + len(b.Err.InputPositions()) == 0 { + bb := *b + bb.Err = errors.Wrapf(b.Err, c.src.Pos(), "") + result = &bb + } + c.errs = CombineErrors(c.src, c.errs, result) + } + if c.errs != nil { + result = c.errs + } + c.src = savedSrc + }() + + switch x := v.(type) { + case Value: + return x + + case Evaluator: + v := x.evaluate(c) + return v + + case Resolver: + v := x.resolve(c, state) + if c.HasErr() { + return nil + } + if v == nil { + return nil + } + + if v.isUndefined() || state > v.status { + // Keep a minimum state of AllArcs. + // TODO: AllArcs may still not be achieved if a node is currently + // evaluating. + state := state + if state < AllArcs { + state = AllArcs + } + // Use node itself to allow for cycle detection. + c.Unify(v, state) + } + + return v + + default: + // This can only happen, really, if v == nil, which is not allowed. + panic(fmt.Sprintf("unexpected Expr type %T", v)) + } +} + +func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state VertexStatus) *Vertex { + if l == InvalidLabel || x == nil { + // TODO: is it possible to have an invalid label here? Maybe through the + // API? + return &Vertex{} + } + + // var kind Kind + // if x.BaseValue != nil { + // kind = x.BaseValue.Kind() + // } + + switch x.BaseValue.(type) { + case *StructMarker: + if l.Typ() == IntLabel { + c.addErrf(0, pos, "invalid struct selector %s (type int)", l) + return nil + } + + case *ListMarker: + switch { + case l.Typ() == IntLabel: + switch { + case l.Index() < 0: + c.addErrf(0, pos, "invalid list index %s (index must be non-negative)", l) + return nil + case l.Index() > len(x.Arcs): + c.addErrf(0, pos, "invalid list index %s (out of bounds)", l) + return nil + } + + case l.IsDef(), l.IsHidden(): + + default: + c.addErrf(0, pos, "invalid list index %s (type string)", l) + return nil + } + + case nil: + // c.addErrf(IncompleteError, pos, "incomplete value %s", x) + // return nil + + case *Bottom: + + default: + kind := x.BaseValue.Kind() + if kind&(ListKind|StructKind) != 0 { + // c.addErrf(IncompleteError, pos, + // "cannot look up %s in incomplete type %s (type %s)", + // l, x.Source(), kind) + // return nil + } else if !l.IsDef() && !l.IsHidden() { + c.addErrf(0, pos, + "invalid selector %s for value of type %s", l, kind) + return nil + } + } + + a := x.Lookup(l) + if a != nil { + a = a.Indirect() + } + + var hasCycle bool +outer: + switch { + case c.nonMonotonicLookupNest == 0 && c.nonMonotonicRejectNest == 0: + case a != nil: + if state == Partial { + a.nonMonotonicLookupGen = c.nonMonotonicGeneration + } + + case x.state != nil && state == Partial: + for _, e := range x.state.exprs { + if isCyclePlaceholder(e.err) { + hasCycle = true + } + } + for _, a := range x.state.usedArcs { + if a.Label == l { + a.nonMonotonicLookupGen = c.nonMonotonicGeneration + if c.nonMonotonicRejectNest > 0 { + a.nonMonotonicReject = true + } + break outer + } + } + a := &Vertex{Label: l, nonMonotonicLookupGen: c.nonMonotonicGeneration} + if c.nonMonotonicRejectNest > 0 { + a.nonMonotonicReject = true + } + x.state.usedArcs = append(x.state.usedArcs, a) + } + + if a != nil && state > a.status { + c.Unify(a, state) + } + + if a == nil { + if x.state != nil { + for _, e := range x.state.exprs { + if isCyclePlaceholder(e.err) { + hasCycle = true + } + } + } + code := IncompleteError + if !x.Accept(c, l) { + code = 0 + } else if hasCycle { + code = CycleError + } + // TODO: if the struct was a literal struct, we can also treat it as + // closed and make this a permanent error. + label := l.SelectorString(c.Runtime) + + // TODO(errors): add path reference and make message + // "undefined field %s in %s" + if l.IsInt() { + c.addErrf(code, pos, "index out of range [%d] with length %d", + l.Index(), len(x.Elems())) + } else { + if code != 0 && x.IsOptional(l) { + c.addErrf(code, pos, + "cannot reference optional field: %s", label) + } else { + c.addErrf(code, pos, "undefined field: %s", label) + } + } + } + return a +} + +func (c *OpContext) Label(src Expr, x Value) Feature { + return labelFromValue(c, src, x) +} + +func (c *OpContext) typeError(v Value, k Kind) { + if isError(v) { + return + } + if !IsConcrete(v) && v.Kind()&k != 0 { + c.addErrf(IncompleteError, pos(v), "incomplete %s: %s", k, v) + } else { + c.AddErrf("cannot use %s (type %s) as type %s", v, v.Kind(), k) + } +} + +func (c *OpContext) typeErrorAs(v Value, k Kind, as interface{}) { + if as == nil { + c.typeError(v, k) + return + } + if isError(v) { + return + } + if !IsConcrete(v) && v.Kind()&k != 0 { + c.addErrf(IncompleteError, pos(v), + "incomplete %s in %v: %s", k, as, v) + } else { + c.AddErrf("cannot use %s (type %s) as type %s in %v", v, v.Kind(), k, as) + } +} + +var emptyNode = &Vertex{} + +func pos(x Node) token.Pos { + if x.Source() == nil { + return token.NoPos + } + return x.Source().Pos() +} + +func (c *OpContext) node(orig Node, x Expr, scalar bool, state VertexStatus) *Vertex { + // TODO: always get the vertex. This allows a whole bunch of trickery + // down the line. + v := c.unifyNode(x, state) + + v, ok := c.getDefault(v) + if !ok { + // Error already generated by getDefault. + return emptyNode + } + + // The two if blocks below are rather subtle. If we have an error of + // the sentinel value cycle, we have earlier determined that the cycle is + // allowed and that it can be ignored here. Any other CycleError is an + // annotated cycle error that could be taken as is. + // TODO: do something simpler. + if scalar { + if w := Unwrap(v); !isCyclePlaceholder(w) { + v = w + } + } + + node, ok := v.(*Vertex) + if ok && !isCyclePlaceholder(node.BaseValue) { + v = node.Value() + } + + switch nv := v.(type) { + case nil: + switch orig.(type) { + case *ForClause: + c.addErrf(IncompleteError, pos(x), + "cannot range over %s (incomplete)", x) + default: + c.addErrf(IncompleteError, pos(x), + "%s undefined (%s is incomplete)", orig, x) + } + return emptyNode + + case *Bottom: + // TODO: this is a bit messy. In some cases errors are already added + // and in some cases not. Not a huge deal, as errors will be uniqued + // down the line, but could be better. + c.AddBottom(nv) + return emptyNode + + case *Vertex: + if node == nil { + panic("unexpected markers with nil node") + } + + default: + if kind := v.Kind(); kind&StructKind != 0 { + switch orig.(type) { + case *ForClause: + c.addErrf(IncompleteError, pos(x), + "cannot range over %s (incomplete type %s)", x, kind) + default: + c.addErrf(IncompleteError, pos(x), + "%s undefined as %s is incomplete (type %s)", orig, x, kind) + } + return emptyNode + + } else if !ok { + c.addErrf(0, pos(x), // TODO(error): better message. + "invalid operand %s (found %s, want list or struct)", + x.Source(), v.Kind()) + return emptyNode + } + } + + return node +} + +// Elems returns the elements of a list. +func (c *OpContext) Elems(v Value) []*Vertex { + list := c.list(v) + return list.Elems() +} + +func (c *OpContext) list(v Value) *Vertex { + x, ok := v.(*Vertex) + if !ok || !x.IsList() { + c.typeError(v, ListKind) + return emptyNode + } + return x +} + +func (c *OpContext) scalar(v Value) Value { + v = Unwrap(v) + switch v.(type) { + case *Null, *Bool, *Num, *String, *Bytes: + default: + c.typeError(v, ScalarKinds) + } + return v +} + +var zero = &Num{K: NumKind} + +func (c *OpContext) Num(v Value, as interface{}) *Num { + v = Unwrap(v) + if isError(v) { + return zero + } + x, ok := v.(*Num) + if !ok { + c.typeErrorAs(v, NumKind, as) + return zero + } + return x +} + +func (c *OpContext) Int64(v Value) int64 { + v = Unwrap(v) + if isError(v) { + return 0 + } + x, ok := v.(*Num) + if !ok { + c.typeError(v, IntKind) + return 0 + } + i, err := x.X.Int64() + if err != nil { + c.AddErrf("number is not an int64: %v", err) + return 0 + } + return i +} + +func (c *OpContext) uint64(v Value, as string) uint64 { + v = Unwrap(v) + if isError(v) { + return 0 + } + x, ok := v.(*Num) + if !ok { + c.typeErrorAs(v, IntKind, as) + return 0 + } + if x.X.Negative { + // TODO: improve message + c.AddErrf("cannot convert negative number to uint64") + return 0 + } + if !x.X.Coeff.IsUint64() { + // TODO: improve message + c.AddErrf("cannot convert number %s to uint64", x.X) + return 0 + } + return x.X.Coeff.Uint64() +} + +func (c *OpContext) BoolValue(v Value) bool { + return c.boolValue(v, nil) +} + +func (c *OpContext) boolValue(v Value, as interface{}) bool { + v = Unwrap(v) + if isError(v) { + return false + } + x, ok := v.(*Bool) + if !ok { + c.typeErrorAs(v, BoolKind, as) + return false + } + return x.B +} + +func (c *OpContext) StringValue(v Value) string { + return c.stringValue(v, nil) +} + +// ToBytes returns the bytes value of a scalar value. +func (c *OpContext) ToBytes(v Value) []byte { + if x, ok := v.(*Bytes); ok { + return x.B + } + return []byte(c.ToString(v)) +} + +// ToString returns the string value of a scalar value. +func (c *OpContext) ToString(v Value) string { + return c.toStringValue(v, StringKind|NumKind|BytesKind|BoolKind, nil) + +} + +func (c *OpContext) stringValue(v Value, as interface{}) string { + return c.toStringValue(v, StringKind, as) +} + +func (c *OpContext) toStringValue(v Value, k Kind, as interface{}) string { + v = Unwrap(v) + if isError(v) { + return "" + } + if v.Kind()&k == 0 { + if as == nil { + c.typeError(v, k) + } else { + c.typeErrorAs(v, k, as) + } + return "" + } + switch x := v.(type) { + case *String: + return x.Str + + case *Bytes: + return bytesToString(x.B) + + case *Num: + return x.X.String() + + case *Bool: + if x.B { + return "true" + } + return "false" + + default: + c.addErrf(IncompleteError, c.pos(), + "non-concrete value %s (type %s)", v, v.Kind()) + } + return "" +} + +func bytesToString(b []byte) string { + b, _ = unicode.UTF8.NewDecoder().Bytes(b) + return string(b) +} + +func (c *OpContext) bytesValue(v Value, as interface{}) []byte { + v = Unwrap(v) + if isError(v) { + return nil + } + x, ok := v.(*Bytes) + if !ok { + c.typeErrorAs(v, BytesKind, as) + return nil + } + return x.B +} + +var matchNone = regexp.MustCompile("^$") + +func (c *OpContext) regexp(v Value) *regexp.Regexp { + v = Unwrap(v) + if isError(v) { + return matchNone + } + switch x := v.(type) { + case *String: + if x.RE != nil { + return x.RE + } + // TODO: synchronization + p, err := regexp.Compile(x.Str) + if err != nil { + // FatalError? How to cache error + c.AddErrf("invalid regexp: %s", err) + x.RE = matchNone + } else { + x.RE = p + } + return x.RE + + case *Bytes: + if x.RE != nil { + return x.RE + } + // TODO: synchronization + p, err := regexp.Compile(string(x.B)) + if err != nil { + c.AddErrf("invalid regexp: %s", err) + x.RE = matchNone + } else { + x.RE = p + } + return x.RE + + default: + c.typeError(v, StringKind|BytesKind) + return matchNone + } +} + +// newNum creates a new number of the given kind. It reports an error value +// instead if any error occurred. +func (c *OpContext) newNum(d *apd.Decimal, k Kind, sources ...Node) Value { + if c.HasErr() { + return c.Err() + } + return &Num{Src: c.src, X: *d, K: k} +} + +func (c *OpContext) NewInt64(n int64, sources ...Node) Value { + if c.HasErr() { + return c.Err() + } + d := apd.New(n, 0) + return &Num{Src: c.src, X: *d, K: IntKind} +} + +func (c *OpContext) NewString(s string) Value { + if c.HasErr() { + return c.Err() + } + return &String{Src: c.src, Str: s} +} + +func (c *OpContext) newBytes(b []byte) Value { + if c.HasErr() { + return c.Err() + } + return &Bytes{Src: c.src, B: b} +} + +func (c *OpContext) newBool(b bool) Value { + if c.HasErr() { + return c.Err() + } + return &Bool{Src: c.src, B: b} +} + +func (c *OpContext) newList(src ast.Node, parent *Vertex) *Vertex { + return &Vertex{Parent: parent, BaseValue: &ListMarker{}} +} + +// Str reports a debug string of x. +func (c *OpContext) Str(x Node) string { + if c.Format == nil { + return fmt.Sprintf("%T", x) + } + return c.Format(x) +} + +// NewList returns a new list for the given values. +func (c *OpContext) NewList(values ...Value) *Vertex { + // TODO: consider making this a literal list instead. + list := &ListLit{} + v := &Vertex{ + Conjuncts: []Conjunct{{Env: nil, x: list}}, + } + + for _, x := range values { + list.Elems = append(list.Elems, x) + } + c.Unify(v, Finalized) + return v +} diff --git a/vendor/cuelang.org/go/internal/core/adt/decimal.go b/vendor/cuelang.org/go/internal/core/adt/decimal.go new file mode 100644 index 0000000000..e7eba38566 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/decimal.go @@ -0,0 +1,131 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "math/big" + + "github.com/cockroachdb/apd/v2" +) + +var apdCtx apd.Context + +func init() { + apdCtx = apd.BaseContext + apdCtx.Precision = 24 +} + +func (n *Num) Impl() *apd.Decimal { + return &n.X +} + +func (n *Num) Negative() bool { + return n.X.Negative +} + +func (a *Num) Cmp(b *Num) int { + return a.X.Cmp(&b.X) +} + +func (c *OpContext) Add(a, b *Num) Value { + return numOp(c, apdCtx.Add, a, b) +} + +func (c *OpContext) Sub(a, b *Num) Value { + return numOp(c, apdCtx.Sub, a, b) +} + +func (c *OpContext) Mul(a, b *Num) Value { + return numOp(c, apdCtx.Mul, a, b) +} + +func (c *OpContext) Quo(a, b *Num) Value { + v := numOp(c, apdCtx.Quo, a, b) + if n, ok := v.(*Num); ok { + n.K = FloatKind + } + return v +} + +func (c *OpContext) Pow(a, b *Num) Value { + return numOp(c, apdCtx.Pow, a, b) +} + +type numFunc func(z, x, y *apd.Decimal) (apd.Condition, error) + +func numOp(c *OpContext, fn numFunc, x, y *Num) Value { + var d apd.Decimal + + cond, err := fn(&d, &x.X, &y.X) + + if err != nil { + return c.NewErrf("failed arithmetic: %v", err) + } + + if cond.DivisionByZero() { + return c.NewErrf("division by zero") + } + + k := x.Kind() & y.Kind() + if k == 0 { + k = FloatKind + } + return c.newNum(&d, k) +} + +func (c *OpContext) IntDiv(a, b *Num) Value { + return intDivOp(c, (*big.Int).Div, a, b) +} + +func (c *OpContext) IntMod(a, b *Num) Value { + return intDivOp(c, (*big.Int).Mod, a, b) +} + +func (c *OpContext) IntQuo(a, b *Num) Value { + return intDivOp(c, (*big.Int).Quo, a, b) +} + +func (c *OpContext) IntRem(a, b *Num) Value { + return intDivOp(c, (*big.Int).Rem, a, b) +} + +type intFunc func(z, x, y *big.Int) *big.Int + +func intDivOp(c *OpContext, fn intFunc, a, b *Num) Value { + if b.X.IsZero() { + return c.NewErrf("division by zero") + } + + var x, y apd.Decimal + _, _ = apdCtx.RoundToIntegralValue(&x, &a.X) + if x.Negative { + x.Coeff.Neg(&x.Coeff) + } + _, _ = apdCtx.RoundToIntegralValue(&y, &b.X) + if y.Negative { + y.Coeff.Neg(&y.Coeff) + } + + var d apd.Decimal + + fn(&d.Coeff, &x.Coeff, &y.Coeff) + + if d.Coeff.Sign() < 0 { + d.Coeff.Neg(&d.Coeff) + d.Negative = true + } + + return c.newNum(&d, IntKind) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/default.go b/vendor/cuelang.org/go/internal/core/adt/default.go new file mode 100644 index 0000000000..6e6f1b19f4 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/default.go @@ -0,0 +1,132 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// Default returns the default value or itself if there is no default. +func Default(v Value) Value { + switch x := v.(type) { + case *Vertex: + return x.Default() + case *Disjunction: + return x.Default() + default: + return v + } +} + +func (d *Disjunction) Default() Value { + switch d.NumDefaults { + case 0: + return d + case 1: + return d.Values[0] + default: + return &Disjunction{ + Src: d.Src, + Values: d.Values[:d.NumDefaults], + NumDefaults: 0, + } + } +} + +// Default returns the default value or itself if there is no default. +// +// It also closes a list, representing its default value. +func (v *Vertex) Default() *Vertex { + switch d := v.BaseValue.(type) { + default: + return v + + case *Disjunction: + var w *Vertex + + switch d.NumDefaults { + case 0: + return v + case 1: + w = d.Values[0].Default() + default: + x := *v + x.state = nil + x.BaseValue = &Disjunction{ + Src: d.Src, + Values: d.Values[:d.NumDefaults], + NumDefaults: 0, + } + w = &x + w.Conjuncts = nil + } + + if w.Conjuncts == nil { + for _, c := range v.Conjuncts { + // TODO: preserve field information. + expr, _ := stripNonDefaults(c.Elem()) + w.Conjuncts = append(w.Conjuncts, MakeRootConjunct(c.Env, expr)) + } + } + return w + + case *ListMarker: + m := *d + m.IsOpen = false + + w := *v + w.BaseValue = &m + w.state = nil + return &w + } +} + +// TODO: this should go: record preexpanded disjunctions in Vertex. +func stripNonDefaults(elem Elem) (r Elem, stripped bool) { + expr, ok := elem.(Expr) + if !ok { + return elem, false + } + switch x := expr.(type) { + case *DisjunctionExpr: + if !x.HasDefaults { + return x, false + } + d := *x + d.Values = []Disjunct{} + for _, v := range x.Values { + if v.Default { + d.Values = append(d.Values, v) + } + } + if len(d.Values) == 1 { + return d.Values[0].Val, true + } + return &d, true + + case *BinaryExpr: + if x.Op != AndOp { + return x, false + } + a, sa := stripNonDefaults(x.X) + b, sb := stripNonDefaults(x.Y) + if sa || sb { + bin := *x + bin.X = a.(Expr) + bin.Y = b.(Expr) + return &bin, true + } + return x, false + + default: + return x, false + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct.go b/vendor/cuelang.org/go/internal/core/adt/disjunct.go new file mode 100644 index 0000000000..8f2074a40a --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/disjunct.go @@ -0,0 +1,588 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// Nodes man not reenter a disjunction. +// +// Copy one layer deep; throw away items on failure. + +// DISJUNCTION ALGORITHM +// +// The basic concept of the algorithm is to use backtracking to find valid +// disjunctions. The algorithm can stop if two matching disjuncts are found +// where one does not subsume the other. +// +// At a later point, we can introduce a filter step to filter out possible +// disjuncts based on, say, discriminator fields or field exclusivity (oneOf +// fields in Protobuf). +// +// To understand the details of the algorithm, it is important to understand +// some properties of disjunction. +// +// +// EVALUATION OF A DISJUNCTION IS SELF CONTAINED +// +// In other words, fields outside of a disjunction cannot bind to values within +// a disjunction whilst evaluating that disjunction. This allows the computation +// of disjunctions to be isolated from side effects. +// +// The intuition behind this is as follows: as a disjunction is not a concrete +// value, it is not possible to lookup a field within a disjunction if it has +// not yet been evaluated. So if a reference within a disjunction that is needed +// to disambiguate that disjunction refers to a field outside the scope of the +// disjunction which, in turn, refers to a field within the disjunction, this +// results in a cycle error. We achieve this by not removing the cycle marker of +// the Vertex of the disjunction until the disjunction is resolved. +// +// Note that the following disjunct is still allowed: +// +// a: 1 +// b: a +// +// Even though `a` refers to the root of the disjunction, it does not _select +// into_ the disjunction. Implementation-wise, it also doesn't have to, as the +// respective vertex is available within the Environment. Referencing a node +// outside the disjunction that in turn selects the disjunction root, however, +// will result in a detected cycle. +// +// As usual, cycle detection should be interpreted marked as incomplete, so that +// the referring node will not be fixed to an error prematurely. +// +// +// SUBSUMPTION OF AMBIGUOUS DISJUNCTS +// +// A disjunction can be evaluated to a concrete value if only one disjunct +// remains. Aside from disambiguating through unification failure, disjuncts +// may also be disambiguated by taking the least specific of two disjuncts. +// For instance, if a subsumes b, then the result of disjunction may be a. +// +// NEW ALGORITHM NO LONGER VERIFIES SUBSUMPTION. SUBSUMPTION IS INHERENTLY +// IMPRECISE (DUE TO BULK OPTIONAL FIELDS). OTHER THAN THAT, FOR SCALAR VALUES +// IT JUST MEANS THERE IS AMBIGUITY, AND FOR STRUCTS IT CAN LEAD TO STRANGE +// CONSEQUENCES. +// +// USE EQUALITY INSTEAD: +// - Undefined == error for optional fields. +// - So only need to check exact labels for vertices. + +type envDisjunct struct { + env *Environment + cloneID CloseInfo + expr *DisjunctionExpr + value *Disjunction + hasDefaults bool + + // These are used for book keeping, tracking whether any of the + // disjuncts marked with a default marker remains after unification. + // If no default is used, all other elements are treated as "maybeDefault". + // Otherwise, elements are treated as is. + parentDefaultUsed bool + childDefaultUsed bool +} + +func (n *nodeContext) addDisjunction(env *Environment, x *DisjunctionExpr, cloneID CloseInfo) { + + // TODO: precompute + numDefaults := 0 + for _, v := range x.Values { + isDef := v.Default // || n.hasDefaults(env, v.Val) + if isDef { + numDefaults++ + } + } + + n.disjunctions = append(n.disjunctions, + envDisjunct{env, cloneID, x, nil, numDefaults > 0, false, false}) +} + +func (n *nodeContext) addDisjunctionValue(env *Environment, x *Disjunction, cloneID CloseInfo) { + n.disjunctions = append(n.disjunctions, + envDisjunct{env, cloneID, nil, x, x.HasDefaults, false, false}) + +} + +func (n *nodeContext) expandDisjuncts( + state VertexStatus, + parent *nodeContext, + parentMode defaultMode, // default mode of this disjunct + recursive, last bool) { + + n.ctx.stats.DisjunctCount++ + + node := n.node + defer func() { + n.node = node + }() + + for n.expandOne() { + } + + // save node to snapShot in nodeContex + // save nodeContext. + + if recursive || len(n.disjunctions) > 0 { + n.snapshot = clone(*n.node) + } else { + n.snapshot = *n.node + } + + defaultOffset := len(n.usedDefault) + + switch { + default: // len(n.disjunctions) == 0 + m := *n + n.postDisjunct(state) + + switch { + case n.hasErr(): + // TODO: consider finalizing the node thusly: + // if recursive { + // n.node.Finalize(n.ctx) + // } + x := n.node + err, ok := x.BaseValue.(*Bottom) + if !ok { + err = n.getErr() + } + if err == nil { + // TODO(disjuncts): Is this always correct? Especially for partial + // evaluation it is okay for child errors to have incomplete errors. + // Perhaps introduce an Err() method. + err = x.ChildErrors + } + if err.IsIncomplete() { + break + } + if err != nil { + parent.disjunctErrs = append(parent.disjunctErrs, err) + } + if recursive { + n.free() + } + return + } + + if recursive { + *n = m + n.result = *n.node // XXX: n.result = snapshotVertex(n.node)? + n.node = &n.result + n.disjuncts = append(n.disjuncts, n) + } + if n.node.BaseValue == nil { + n.node.BaseValue = n.getValidators() + } + + n.usedDefault = append(n.usedDefault, defaultInfo{ + parentMode: parentMode, + nestedMode: parentMode, + origMode: parentMode, + }) + + case len(n.disjunctions) > 0: + // Process full disjuncts to ensure that erroneous disjuncts are + // eliminated as early as possible. + state = Finalized + + n.disjuncts = append(n.disjuncts, n) + + n.refCount++ + defer n.free() + + for i, d := range n.disjunctions { + a := n.disjuncts + n.disjuncts = n.buffer[:0] + n.buffer = a[:0] + + last := i+1 == len(n.disjunctions) + skipNonMonotonicChecks := i+1 < len(n.disjunctions) + if skipNonMonotonicChecks { + n.ctx.inDisjunct++ + } + + for _, dn := range a { + switch { + case d.expr != nil: + for _, v := range d.expr.Values { + cn := dn.clone() + *cn.node = clone(dn.snapshot) + cn.node.state = cn + + c := MakeConjunct(d.env, v.Val, d.cloneID) + cn.addExprConjunct(c) + + newMode := mode(d.hasDefaults, v.Default) + + cn.expandDisjuncts(state, n, newMode, true, last) + } + + case d.value != nil: + for i, v := range d.value.Values { + cn := dn.clone() + *cn.node = clone(dn.snapshot) + cn.node.state = cn + + cn.addValueConjunct(d.env, v, d.cloneID) + + newMode := mode(d.hasDefaults, i < d.value.NumDefaults) + + cn.expandDisjuncts(state, n, newMode, true, last) + } + } + } + + if skipNonMonotonicChecks { + n.ctx.inDisjunct-- + } + + if len(n.disjuncts) == 0 { + n.makeError() + } + + if recursive || i > 0 { + for _, x := range a { + x.free() + } + } + + if len(n.disjuncts) == 0 { + break + } + } + + // Annotate disjunctions with whether any of the default disjunctions + // was used. + for _, d := range n.disjuncts { + for i, info := range d.usedDefault[defaultOffset:] { + if info.parentMode == isDefault { + n.disjunctions[i].parentDefaultUsed = true + } + if info.origMode == isDefault { + n.disjunctions[i].childDefaultUsed = true + } + } + } + + // Combine parent and child default markers, considering that a parent + // "notDefault" is treated as "maybeDefault" if none of the disjuncts + // marked as default remain. + // + // NOTE for a parent marked as "notDefault", a child is *never* + // considered as default. It may either be "not" or "maybe" default. + // + // The result for each disjunction is conjoined into a single value. + for _, d := range n.disjuncts { + m := maybeDefault + orig := maybeDefault + for i, info := range d.usedDefault[defaultOffset:] { + parent := info.parentMode + + used := n.disjunctions[i].parentDefaultUsed + childUsed := n.disjunctions[i].childDefaultUsed + hasDefaults := n.disjunctions[i].hasDefaults + + orig = combineDefault(orig, info.parentMode) + orig = combineDefault(orig, info.nestedMode) + + switch { + case childUsed: + // One of the children used a default. This is "normal" + // mode. This may also happen when we are in + // hasDefaults/notUsed mode. Consider + // + // ("a" | "b") & (*(*"a" | string) | string) + // + // Here the doubly nested default is called twice, once + // for "a" and then for "b", where the second resolves to + // not using a default. The first does, however, and on that + // basis the "ot default marker cannot be overridden. + m = combineDefault(m, info.parentMode) + m = combineDefault(m, info.origMode) + + case !hasDefaults, used: + m = combineDefault(m, info.parentMode) + m = combineDefault(m, info.nestedMode) + + case hasDefaults && !used: + Assertf(parent == notDefault, "unexpected default mode") + } + } + d.defaultMode = m + + d.usedDefault = d.usedDefault[:defaultOffset] + d.usedDefault = append(d.usedDefault, defaultInfo{ + parentMode: parentMode, + nestedMode: m, + origMode: orig, + }) + + } + + // TODO: this is an old trick that seems no longer necessary for the new + // implementation. Keep around until we finalize the semantics for + // defaults, though. The recursion of nested defaults is not entirely + // proper yet. + // + // A better approach, that avoids the need for recursion (semantically), + // would be to only consider default usage for one level, but then to + // also allow a default to be passed if only one value is remaining. + // This means that a nested subsumption would first have to be evaluated + // in isolation, however, to determine that it is not previous + // disjunctions that cause the disambiguation. + // + // HACK alert: this replaces the hack of the previous algorithm with a + // slightly less worse hack: instead of dropping the default info when + // the value was scalar before, we drop this information when there is + // only one disjunct, while not discarding hard defaults. TODO: a more + // principled approach would be to recognize that there is only one + // default at a point where this does not break commutativity. if + // if len(n.disjuncts) == 1 && n.disjuncts[0].defaultMode != isDefault { + // n.disjuncts[0].defaultMode = maybeDefault + // } + } + + // Compare to root, but add to this one. + switch p := parent; { + case p != n: + p.disjunctErrs = append(p.disjunctErrs, n.disjunctErrs...) + n.disjunctErrs = n.disjunctErrs[:0] + + outer: + for _, d := range n.disjuncts { + for k, v := range p.disjuncts { + if !d.done() || !v.done() { + break + } + flags := CheckStructural + if last { + flags |= IgnoreOptional + } + if Equal(n.ctx, &v.result, &d.result, flags) { + m := maybeDefault + for _, u := range d.usedDefault { + m = combineDefault(m, u.nestedMode) + } + if m == isDefault { + p.disjuncts[k] = d + v.free() + } else { + d.free() + } + continue outer + } + } + + p.disjuncts = append(p.disjuncts, d) + } + + n.disjuncts = n.disjuncts[:0] + } +} + +func (n *nodeContext) makeError() { + code := IncompleteError + + if len(n.disjunctErrs) > 0 { + code = EvalError + for _, c := range n.disjunctErrs { + if c.Code > code { + code = c.Code + } + } + } + + b := &Bottom{ + Code: code, + Err: n.disjunctError(), + } + n.node.SetValue(n.ctx, Finalized, b) +} + +func mode(hasDefault, marked bool) defaultMode { + var mode defaultMode + switch { + case !hasDefault: + mode = maybeDefault + case marked: + mode = isDefault + default: + mode = notDefault + } + return mode +} + +// clone makes a shallow copy of a Vertex. The purpose is to create different +// disjuncts from the same Vertex under computation. This allows the conjuncts +// of an arc to be reset to a previous position and the reuse of earlier +// computations. +// +// Notes: only Arcs need to be copied recursively. Either the arc is finalized +// and can be used as is, or Structs is assumed to not yet be computed at the +// time that a clone is needed and must be nil. Conjuncts no longer needed and +// can become nil. All other fields can be copied shallowly. +func clone(v Vertex) Vertex { + v.state = nil + if a := v.Arcs; len(a) > 0 { + v.Arcs = make([]*Vertex, len(a)) + for i, arc := range a { + switch arc.status { + case Finalized: + v.Arcs[i] = arc + + case 0: + a := *arc + v.Arcs[i] = &a + + a.Conjuncts = make([]Conjunct, len(arc.Conjuncts)) + copy(a.Conjuncts, arc.Conjuncts) + + default: + a := *arc + a.state = arc.state.clone() + a.state.node = &a + a.state.snapshot = clone(a) + v.Arcs[i] = &a + } + } + } + + if a := v.Structs; len(a) > 0 { + v.Structs = make([]*StructInfo, len(a)) + copy(v.Structs, a) + } + + return v +} + +// Default rules from spec: +// +// U1: (v1, d1) & v2 => (v1&v2, d1&v2) +// U2: (v1, d1) & (v2, d2) => (v1&v2, d1&d2) +// +// D1: (v1, d1) | v2 => (v1|v2, d1) +// D2: (v1, d1) | (v2, d2) => (v1|v2, d1|d2) +// +// M1: *v => (v, v) +// M2: *(v1, d1) => (v1, d1) +// +// NOTE: M2 cannot be *(v1, d1) => (v1, v1), as this has the weird property +// of making a value less specific. This causes issues, for instance, when +// trimming. +// +// The old implementation does something similar though. It will discard +// default information after first determining if more than one conjunct +// has survived. +// +// def + maybe -> def +// not + maybe -> def +// not + def -> def + +type defaultMode int + +const ( + maybeDefault defaultMode = iota + isDefault + notDefault +) + +// combineDefaults combines default modes for unifying conjuncts. +// +// Default rules from spec: +// +// U1: (v1, d1) & v2 => (v1&v2, d1&v2) +// U2: (v1, d1) & (v2, d2) => (v1&v2, d1&d2) +func combineDefault(a, b defaultMode) defaultMode { + if a > b { + return a + } + return b +} + +// disjunctError returns a compound error for a failed disjunction. +// +// TODO(perf): the set of errors is now computed during evaluation. Eventually, +// this could be done lazily. +func (n *nodeContext) disjunctError() (errs errors.Error) { + ctx := n.ctx + + disjuncts := selectErrors(n.disjunctErrs) + + if disjuncts == nil { + errs = ctx.Newf("empty disjunction") // XXX: add space to sort first + } else { + disjuncts = errors.Sanitize(disjuncts) + k := len(errors.Errors(disjuncts)) + // prefix '-' to sort to top + errs = ctx.Newf("%d errors in empty disjunction:", k) + } + + errs = errors.Append(errs, disjuncts) + + return errs +} + +func selectErrors(a []*Bottom) (errs errors.Error) { + // return all errors if less than a certain number. + if len(a) <= 2 { + for _, b := range a { + errs = errors.Append(errs, b.Err) + + } + return errs + } + + // First select only relevant errors. + isIncomplete := false + k := 0 + for _, b := range a { + if !isIncomplete && b.Code >= IncompleteError { + k = 0 + isIncomplete = true + } + a[k] = b + k++ + } + a = a[:k] + + // filter errors + positions := map[token.Pos]bool{} + + add := func(b *Bottom, p token.Pos) bool { + if positions[p] { + return false + } + positions[p] = true + errs = errors.Append(errs, b.Err) + return true + } + + for _, b := range a { + // TODO: Should we also distinguish by message type? + if add(b, b.Err.Position()) { + continue + } + for _, p := range b.Err.InputPositions() { + if add(b, p) { + break + } + } + } + + return errs +} diff --git a/vendor/cuelang.org/go/internal/core/adt/doc.go b/vendor/cuelang.org/go/internal/core/adt/doc.go new file mode 100644 index 0000000000..26c978e2f3 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/doc.go @@ -0,0 +1,78 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adt represents partially and fully evaluated CUE types. +// +// This package introduces several categories of types that indicate some set of +// values that may be used in a certain situation. Concrete types may belong to +// multiple categories. +// +// +// Abstract Types +// +// The following types describe the a place where a value may be used: +// +// Decl a value than can be used as a StructLit element. +// Elem a value than can be used as a ListLit element. +// Expr represents an Expr in the CUE grammar. +// Value a fully evaluated value that has no references (except for +// children in composite values). +// Node any of the above values. +// +// The following types categorize nodes by function: +// +// Resolver a reference to position in the result tree. +// Evaluator evaluates to 1 value. +// Yielder evaluates to 0 or more values. +// Validator validates another value. +// +// +// Reference resolution algorithm +// +// A Resolver is resolved within the context of an Environment. In CUE, a +// reference is evaluated by substituting it with a copy of the value to which +// it refers. If the copied value itself contains references we can distinguish +// two different cases. References that refer to values within the copied +// reference (not regarding selectors) will henceforth point to the copied node. +// References that point to outside the referened value will keep referring to +// their original value. +// +// a: b: { +// c: int +// d: c +// e: f +// } +// f: 4 +// g: a.b { // d.c points to inside the referred value, e.f, not. +// c: 3 +// } +// +// The implementation doesn't actually copy referred values, but rather resolves +// references with the aid of an Environment. During compile time, each +// references is associated with the label and a number indicating in which +// parent scope (offset from the current) this label needs to be looked up. An +// Environment keeps track of the point at which a value was referenced, +// providing enough information to look up the labeled value. This Environment +// is the identical for all references within a fields conjunct. Often, an +// Environment can even be shared among conjuncts. +// +// +// Values +// +// Values are fully evaluated expressions. As this means that all references +// will have been eliminated, Values are fully defined without the need for an +// Environment. Additionally, Values represent a fully evaluated form, stripped +// of any comprehensions, optional fields or embeddings. +// +package adt diff --git a/vendor/cuelang.org/go/internal/core/adt/equality.go b/vendor/cuelang.org/go/internal/core/adt/equality.go new file mode 100644 index 0000000000..cb1d338137 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/equality.go @@ -0,0 +1,192 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +type Flag uint16 + +const ( + // IgnoreOptional allows optional information to be ignored. This only + // applies when CheckStructural is given. + IgnoreOptional Flag = 1 << iota + + // CheckStructural indicates that closedness information should be + // considered for equality. Equal may return false even when values are + // equal. + CheckStructural Flag = 1 << iota +) + +func Equal(ctx *OpContext, v, w Value, flags Flag) bool { + if x, ok := v.(*Vertex); ok { + return equalVertex(ctx, x, w, flags) + } + if y, ok := w.(*Vertex); ok { + return equalVertex(ctx, y, v, flags) + } + return equalTerminal(ctx, v, w, flags) +} + +func equalVertex(ctx *OpContext, x *Vertex, v Value, flags Flag) bool { + y, ok := v.(*Vertex) + if !ok { + return false + } + if x == y { + return true + } + xk := x.Kind() + yk := y.Kind() + + if xk != yk { + return false + } + + if len(x.Arcs) != len(y.Arcs) { + return false + } + + // TODO: this really should be subsumption. + if flags != 0 { + if x.IsClosedStruct() != y.IsClosedStruct() { + return false + } + if !equalClosed(ctx, x, y, flags) { + return false + } + } + +loop1: + for _, a := range x.Arcs { + for _, b := range y.Arcs { + if a.Label == b.Label { + if !Equal(ctx, a, b, flags) { + return false + } + continue loop1 + } + } + return false + } + + // We do not need to do the following check, because of the pigeon-hole principle. + // loop2: + // for _, b := range y.Arcs { + // for _, a := range x.Arcs { + // if a.Label == b.Label { + // continue loop2 + // } + // } + // return false + // } + + v, ok1 := x.BaseValue.(Value) + w, ok2 := y.BaseValue.(Value) + if !ok1 && !ok2 { + return true // both are struct or list. + } + + return equalTerminal(ctx, v, w, flags) +} + +// equalClosed tests if x and y have the same set of close information. +// TODO: the following refinements are possible: +// - unify optional fields and equate the optional fields +// - do the same for pattern constraints, where the pattern constraints +// are collated by pattern equality. +// - a further refinement would collate patterns by ranges. +// +// For all these refinements it would be necessary to have well-working +// structure sharing so as to not repeatedly recompute optional arcs. +func equalClosed(ctx *OpContext, x, y *Vertex, flags Flag) bool { + return verifyStructs(x, y, flags) && verifyStructs(y, x, flags) +} + +func verifyStructs(x, y *Vertex, flags Flag) bool { +outer: + for _, s := range x.Structs { + if (flags&IgnoreOptional != 0) && !s.StructLit.HasOptional() { + continue + } + if s.closeInfo == nil || s.closeInfo.span&DefinitionSpan == 0 { + if !s.StructLit.HasOptional() { + continue + } + } + for _, t := range y.Structs { + if s.StructLit == t.StructLit { + continue outer + } + } + return false + } + return true +} + +func equalTerminal(ctx *OpContext, v, w Value, flags Flag) bool { + if v == w { + return true + } + + switch x := v.(type) { + case *Num, *String, *Bool, *Bytes, *Null: + if b, ok := BinOp(ctx, EqualOp, v, w).(*Bool); ok { + return b.B + } + return false + + // TODO: for the remainder we are dealing with non-concrete values, so we + // could also just not bother. + + case *BoundValue: + if y, ok := w.(*BoundValue); ok { + return x.Op == y.Op && Equal(ctx, x.Value, y.Value, flags) + } + + case *BasicType: + if y, ok := w.(*BasicType); ok { + return x.K == y.K + } + + case *Conjunction: + y, ok := w.(*Conjunction) + if !ok || len(x.Values) != len(y.Values) { + return false + } + // always ordered the same + for i, xe := range x.Values { + if !Equal(ctx, xe, y.Values[i], flags) { + return false + } + } + return true + + case *Disjunction: + // The best way to compute this is with subsumption, but even that won't + // be too accurate. Assume structural equivalence for now. + y, ok := w.(*Disjunction) + if !ok || len(x.Values) != len(y.Values) { + return false + } + for i, xe := range x.Values { + if !Equal(ctx, xe, y.Values[i], flags) { + return false + } + } + return true + + case *BuiltinValidator: + } + + return false +} diff --git a/vendor/cuelang.org/go/internal/core/adt/errors.go b/vendor/cuelang.org/go/internal/core/adt/errors.go new file mode 100644 index 0000000000..d5f6cfc17a --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/errors.go @@ -0,0 +1,324 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// This file contains error encodings. +// +// +// *Bottom: +// - an adt.Value +// - always belongs to a single vertex. +// - does NOT implement error +// - marks error code used for control flow +// +// errors.Error +// - CUE default error +// - implements error +// - tracks error locations +// - has error message details +// - supports multiple errors +// + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + cueformat "cuelang.org/go/cue/format" + "cuelang.org/go/cue/token" +) + +// ErrorCode indicates the type of error. The type of error may influence +// control flow. No other aspects of an error may influence control flow. +type ErrorCode int + +const ( + // An EvalError is a fatal evaluation error. + EvalError ErrorCode = iota + + // A UserError is a fatal error originating from the user. + UserError + + // StructuralCycleError means a structural cycle was found. Structural + // cycles are permanent errors, but they are not passed up recursively, + // as a unification of a value with a structural cycle with one that + // doesn't may still give a useful result. + StructuralCycleError + + // IncompleteError means an evaluation could not complete because of + // insufficient information that may still be added later. + IncompleteError + + // A CycleError indicates a reference error. It is considered to be + // an incomplete error, as reference errors may be broken by providing + // a concrete value. + CycleError +) + +func (c ErrorCode) String() string { + switch c { + case EvalError: + return "eval" + case UserError: + return "user" + case StructuralCycleError: + return "structural cycle" + case IncompleteError: + return "incomplete" + case CycleError: + return "cycle" + } + return "unknown" +} + +// Bottom represents an error or bottom symbol. +// +// Although a Bottom node holds control data, it should not be created until the +// control information already resulted in an error. +type Bottom struct { + Src ast.Node + Err errors.Error + + Code ErrorCode + HasRecursive bool + ChildError bool // Err is the error of the child + NotExists bool // This error originated from a failed lookup. + // Value holds the computed value so far in case + Value Value +} + +func (x *Bottom) Source() ast.Node { return x.Src } +func (x *Bottom) Kind() Kind { return BottomKind } +func (x *Bottom) Specialize(k Kind) Value { return x } // XXX remove + +func (b *Bottom) IsIncomplete() bool { + if b == nil { + return false + } + return b.Code == IncompleteError || b.Code == CycleError +} + +// isLiteralBottom reports whether x is an error originating from a user. +func isLiteralBottom(x Expr) bool { + b, ok := x.(*Bottom) + return ok && b.Code == UserError +} + +// isError reports whether v is an error or nil. +func isError(v Value) bool { + if v == nil { + return true + } + _, ok := v.(*Bottom) + return ok +} + +// isIncomplete reports whether v is associated with an incomplete error. +func isIncomplete(v *Vertex) bool { + if v == nil { + return true + } + if b, ok := v.BaseValue.(*Bottom); ok { + return b.IsIncomplete() + } + return false +} + +// AddChildError updates x to record an error that occurred in one of +// its descendent arcs. The resulting error will record the worst error code of +// the current error or recursive error. +// +// If x is not already an error, the value is recorded in the error for +// reference. +// +func (v *Vertex) AddChildError(recursive *Bottom) { + v.ChildErrors = CombineErrors(nil, v.ChildErrors, recursive) + if recursive.IsIncomplete() { + return + } + x := v.BaseValue + err, _ := x.(*Bottom) + if err == nil { + v.BaseValue = &Bottom{ + Code: recursive.Code, + Value: v, + HasRecursive: true, + ChildError: true, + Err: recursive.Err, + } + return + } + + err.HasRecursive = true + if err.Code > recursive.Code { + err.Code = recursive.Code + } + + v.BaseValue = err +} + +// CombineErrors combines two errors that originate at the same Vertex. +func CombineErrors(src ast.Node, x, y Value) *Bottom { + a, _ := Unwrap(x).(*Bottom) + b, _ := Unwrap(y).(*Bottom) + + if a == b && isCyclePlaceholder(a) { + return a + } + switch { + case a != nil && b != nil: + case a != nil: + return a + case b != nil: + return b + default: + return nil + } + + if a.Code != b.Code { + if a.Code > b.Code { + a, b = b, a + } + + if b.Code >= IncompleteError { + return a + } + } + + return &Bottom{ + Src: src, + Err: errors.Append(a.Err, b.Err), + Code: a.Code, + } +} + +// A ValueError is returned as a result of evaluating a value. +type ValueError struct { + r Runtime + v *Vertex + pos token.Pos + auxpos []token.Pos + errors.Message +} + +func (v *ValueError) AddPosition(n Node) { + if n == nil { + return + } + if p := pos(n); p != token.NoPos { + for _, q := range v.auxpos { + if p == q { + return + } + } + v.auxpos = append(v.auxpos, p) + } +} + +func (v *ValueError) AddClosedPositions(c CloseInfo) { + for s := c.closeInfo; s != nil; s = s.parent { + if loc := s.location; loc != nil { + v.AddPosition(loc) + } + } +} + +func (c *OpContext) errNode() *Vertex { + return c.vertex +} + +// MarkPositions marks the current position stack. +func (c *OpContext) MarkPositions() int { + return len(c.positions) +} + +// ReleasePositions sets the position state to one from a call to MarkPositions. +func (c *OpContext) ReleasePositions(p int) { + c.positions = c.positions[:p] +} + +func (c *OpContext) AddPosition(n Node) { + if n != nil { + c.positions = append(c.positions, n) + } +} + +func (c *OpContext) Newf(format string, args ...interface{}) *ValueError { + return c.NewPosf(c.pos(), format, args...) +} + +func appendNodePositions(a []token.Pos, n Node) []token.Pos { + if p := pos(n); p != token.NoPos { + a = append(a, p) + } + if v, ok := n.(*Vertex); ok { + for _, c := range v.Conjuncts { + a = appendNodePositions(a, c.Elem()) + } + } + return a +} + +func (c *OpContext) NewPosf(p token.Pos, format string, args ...interface{}) *ValueError { + var a []token.Pos + if len(c.positions) > 0 { + a = make([]token.Pos, 0, len(c.positions)) + for _, n := range c.positions { + a = appendNodePositions(a, n) + } + } + for i, arg := range args { + switch x := arg.(type) { + case Node: + a = appendNodePositions(a, x) + args[i] = c.Str(x) + case ast.Node: + b, _ := cueformat.Node(x) + if p := x.Pos(); p != token.NoPos { + a = append(a, p) + } + args[i] = string(b) + case Feature: + args[i] = x.SelectorString(c.Runtime) + } + } + return &ValueError{ + r: c.Runtime, + v: c.errNode(), + pos: p, + auxpos: a, + Message: errors.NewMessage(format, args), + } +} + +func (e *ValueError) Error() string { + return errors.String(e) +} + +func (e *ValueError) Position() token.Pos { + return e.pos +} + +func (e *ValueError) InputPositions() (a []token.Pos) { + return e.auxpos +} + +func (e *ValueError) Path() (a []string) { + if e.v == nil { + return nil + } + for _, f := range appendPath(nil, e.v) { + a = append(a, f.SelectorString(e.r)) + } + return a +} diff --git a/vendor/cuelang.org/go/internal/core/adt/eval.go b/vendor/cuelang.org/go/internal/core/adt/eval.go new file mode 100644 index 0000000000..37e8cd9362 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/eval.go @@ -0,0 +1,2173 @@ +// Copyright 2021 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package eval contains the high level CUE evaluation strategy. +// +// CUE allows for a significant amount of freedom in order of evaluation due to +// the commutativity of the unification operation. This package implements one +// of the possible strategies. +package adt + +// TODO: +// - result should be nodeContext: this allows optionals info to be extracted +// and computed. +// + +import ( + "fmt" + "html/template" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO +// +// - Reuse work from previous cycles. For instance, if we can guarantee that a +// value is always correct for partial results, we can just process the arcs +// going from Partial to Finalized, without having to reevaluate the value. +// +// - Test closedness far more thoroughly. +// + +type Stats struct { + DisjunctCount int + UnifyCount int + + Freed int + Retained int + Reused int + Allocs int +} + +// Leaks reports the number of nodeContext structs leaked. These are typically +// benign, as they will just be garbage collected, as long as the pointer from +// the original nodes has been eliminated or the original nodes are also not +// referred to. But Leaks may have notable impact on performance, and thus +// should be avoided. +func (s *Stats) Leaks() int { + return s.Allocs + s.Reused - s.Freed +} + +var stats = template.Must(template.New("stats").Parse(`{{"" -}} + +Leaks: {{.Leaks}} +Freed: {{.Freed}} +Reused: {{.Reused}} +Allocs: {{.Allocs}} +Retain: {{.Retained}} + +Unifications: {{.UnifyCount}} +Disjuncts: {{.DisjunctCount}}`)) + +func (s *Stats) String() string { + buf := &strings.Builder{} + _ = stats.Execute(buf, s) + return buf.String() +} + +func (c *OpContext) Stats() *Stats { + return &c.stats +} + +// TODO: Note: NewContext takes essentially a cue.Value. By making this +// type more central, we can perhaps avoid context creation. + +// func NewContext(r Runtime, v *Vertex) *OpContext { +// e := NewUnifier(r) +// return e.NewContext(v) +// } + +var structSentinel = &StructMarker{} + +var incompleteSentinel = &Bottom{ + Code: IncompleteError, + Err: errors.Newf(token.NoPos, "incomplete"), +} + +// evaluate returns the evaluated value associated with v. It may return a +// partial result. That is, if v was not yet unified, it may return a +// concrete value that must be the result assuming the configuration has no +// errors. +// +// This semantics allows CUE to break reference cycles in a straightforward +// manner. +// +// Vertex v must still be evaluated at some point to catch the underlying +// error. +// +// TODO: return *Vertex +func (c *OpContext) evaluate(v *Vertex, state VertexStatus) Value { + if v.isUndefined() { + // Use node itself to allow for cycle detection. + c.Unify(v, state) + } + + if n := v.state; n != nil { + if n.errs != nil && !n.errs.IsIncomplete() { + return n.errs + } + if n.scalar != nil && isCyclePlaceholder(v.BaseValue) { + return n.scalar + } + } + + switch x := v.BaseValue.(type) { + case *Bottom: + if x.IsIncomplete() { + c.AddBottom(x) + return nil + } + return x + + case nil: + if v.state != nil { + switch x := v.state.getValidators().(type) { + case Value: + return x + default: + w := *v + w.BaseValue = x + return &w + } + } + Assertf(false, "no BaseValue: state: %v; requested: %v", v.status, state) + } + + if v.status < Finalized && v.state != nil { + // TODO: errors are slightly better if we always add addNotify, but + // in this case it is less likely to cause a performance penalty. + // See https://cuelang.org/issue/661. It may be possible to + // relax this again once we have proper tests to prevent regressions of + // that issue. + if !v.state.done() || v.state.errs != nil { + v.state.addNotify(c.vertex) + } + } + + return v +} + +// Unify fully unifies all values of a Vertex to completion and stores +// the result in the Vertex. If unify was called on v before it returns +// the cached results. +func (c *OpContext) Unify(v *Vertex, state VertexStatus) { + // defer c.PopVertex(c.PushVertex(v)) + if Debug { + c.nest++ + c.Logf(v, "Unify") + defer func() { + c.Logf(v, "END Unify") + c.nest-- + }() + } + + // Ensure a node will always have a nodeContext after calling Unify if it is + // not yet Finalized. + n := v.getNodeContext(c) + defer v.freeNode(n) + + if state <= v.Status() { + if v.Status() != Partial && state != Partial { + return + } + } + + switch v.Status() { + case Evaluating: + n.insertConjuncts() + return + + case EvaluatingArcs: + Assertf(v.status > 0, "unexpected status %d", v.status) + return + + case 0: + if v.Label.IsDef() { + v.Closed = true + } + + if v.Parent != nil { + if v.Parent.Closed { + v.Closed = true + } + } + + if p := v.Parent; p != nil && p.state != nil && v.Label.IsString() { + for _, s := range p.state.node.Structs { + if s.Disable { + continue + } + s.MatchAndInsert(n.ctx, v) + } + } + + if !n.checkClosed(state) { + return + } + + defer c.PopArc(c.PushArc(v)) + + c.stats.UnifyCount++ + + // Clear any remaining error. + if err := c.Err(); err != nil { + panic("uncaught error") + } + + // Set the cache to a cycle error to ensure a cyclic reference will result + // in an error if applicable. A cyclic error may be ignored for + // non-expression references. The cycle error may also be removed as soon + // as there is evidence what a correct value must be, but before all + // validation has taken place. + // + // TODO(cycle): having a more recursive algorithm would make this + // special cycle handling unnecessary. + v.BaseValue = cycle + + v.UpdateStatus(Evaluating) + + n.conjuncts = v.Conjuncts + n.insertConjuncts() + + fallthrough + + case Partial: + defer c.PopArc(c.PushArc(v)) + + v.status = Evaluating + + // Use maybeSetCache for cycle breaking + for n.maybeSetCache(); n.expandOne(); n.maybeSetCache() { + } + + n.doNotify() + + if !n.done() { + switch { + case len(n.disjunctions) > 0 && isCyclePlaceholder(v.BaseValue): + // We disallow entering computations of disjunctions with + // incomplete data. + if state == Finalized { + b := c.NewErrf("incomplete cause disjunction") + b.Code = IncompleteError + n.errs = CombineErrors(nil, n.errs, b) + v.SetValue(n.ctx, Finalized, b) + } else { + n.node.UpdateStatus(Partial) + } + return + + case state <= AllArcs: + n.node.UpdateStatus(Partial) + return + } + } + + if s := v.Status(); state <= s { + // We have found a partial result. There may still be errors + // down the line which may result from further evaluating this + // field, but that will be caught when evaluating this field + // for real. + + // This also covers the case where a recursive evaluation triggered + // this field to become finalized in the mean time. In that case + // we can avoid running another expandDisjuncts. + return + } + + // Disjunctions should always be finalized. If there are nested + // disjunctions the last one should be finalized. + disState := state + if len(n.disjunctions) > 0 && disState != Finalized { + disState = Finalized + } + n.expandDisjuncts(disState, n, maybeDefault, false, true) + + n.finalizeDisjuncts() + + switch len(n.disjuncts) { + case 0: + case 1: + x := n.disjuncts[0].result + x.state = nil + *v = x + + default: + d := n.createDisjunct() + v.BaseValue = d + // The conjuncts will have too much information. Better have no + // information than incorrect information. + for _, d := range d.Values { + // We clear the conjuncts for now. As these disjuncts are for API + // use only, we will fill them out when necessary (using Defaults). + d.Conjuncts = nil + + // TODO: use a more principled form of dereferencing. For instance, + // disjuncts could already be assumed to be the given Vertex, and + // the the main vertex could be dereferenced during evaluation. + for _, a := range d.Arcs { + for _, x := range a.Conjuncts { + // All the environments for embedded structs need to be + // dereferenced. + for env := x.Env; env != nil && env.Vertex == v; env = env.Up { + env.Vertex = d + } + } + } + } + v.Arcs = nil + // v.Structs = nil // TODO: should we keep or discard the Structs? + // TODO: how to represent closedness information? Do we need it? + } + + // If the state has changed, it is because a disjunct has been run, or + // because a single disjunct has replaced it. Restore the old state as + // to not confuse memory management. + v.state = n + + // We don't do this in postDisjuncts, as it should only be done after + // completing all disjunctions. + if !n.done() { + if err := n.incompleteErrors(); err != nil { + b, _ := n.node.BaseValue.(*Bottom) + if b != err { + err = CombineErrors(n.ctx.src, b, err) + } + n.node.BaseValue = err + } + } + + if state != Finalized { + return + } + + if v.BaseValue == nil { + v.BaseValue = n.getValidators() + } + + // Free memory here? + v.UpdateStatus(Finalized) + + case AllArcs: + if !n.checkClosed(state) { + break + } + + defer c.PopArc(c.PushArc(v)) + + n.completeArcs(state) + + case Finalized: + } +} + +// insertConjuncts inserts conjuncts previously uninserted. +func (n *nodeContext) insertConjuncts() { + for len(n.conjuncts) > 0 { + nInfos := len(n.node.Structs) + p := &n.conjuncts[0] + n.conjuncts = n.conjuncts[1:] + n.addExprConjunct(*p) + + // Record the OptionalTypes for all structs that were inferred by this + // Conjunct. This information can be used by algorithms such as trim. + for i := nInfos; i < len(n.node.Structs); i++ { + p.CloseInfo.FieldTypes |= n.node.Structs[i].types + } + } +} + +// finalizeDisjuncts: incomplete errors are kept around and not removed early. +// This call filters the incomplete errors and removes them +// +// This also collects all errors of empty disjunctions. These cannot be +// collected during the finalization state of individual disjuncts. Care should +// be taken to only call this after all disjuncts have been finalized. +func (n *nodeContext) finalizeDisjuncts() { + a := n.disjuncts + if len(a) == 0 { + return + } + k := 0 + for i, d := range a { + switch d.finalDone() { + case true: + a[k], a[i] = d, a[k] + k++ + default: + if err := d.incompleteErrors(); err != nil { + n.disjunctErrs = append(n.disjunctErrs, err) + } + } + d.free() + } + if k == 0 { + n.makeError() + } + n.disjuncts = a[:k] +} + +func (n *nodeContext) doNotify() { + if n.errs == nil || len(n.notify) == 0 { + return + } + for _, v := range n.notify { + if v.state == nil { + if b, ok := v.BaseValue.(*Bottom); ok { + v.BaseValue = CombineErrors(nil, b, n.errs) + } else { + v.BaseValue = n.errs + } + } else { + v.state.addBottom(n.errs) + } + } + n.notify = n.notify[:0] +} + +func (n *nodeContext) postDisjunct(state VertexStatus) { + ctx := n.ctx + + for { + // Use maybeSetCache for cycle breaking + for n.maybeSetCache(); n.expandOne(); n.maybeSetCache() { + } + + if aList, id := n.addLists(); aList != nil { + n.updateNodeType(ListKind, aList, id) + } else { + break + } + } + + if n.aStruct != nil { + n.updateNodeType(StructKind, n.aStruct, n.aStructID) + } + + switch err := n.getErr(); { + case err != nil: + n.node.BaseValue = err + n.errs = nil + + default: + if isCyclePlaceholder(n.node.BaseValue) { + if !n.done() { + n.node.BaseValue = n.incompleteErrors() + } else { + n.node.BaseValue = nil + } + } + // TODO: this ideally should be done here. However, doing so causes + // a somewhat more aggressive cutoff in disjunction cycles, which cause + // some incompatibilities. Fix in another CL. + // + // else if !n.done() { + // n.expandOne() + // if err := n.incompleteErrors(); err != nil { + // n.node.BaseValue = err + // } + // } + + // We are no longer evaluating. + // n.node.UpdateStatus(Partial) + n.node.UpdateStatus(Evaluating) + + // Either set to Conjunction or error. + // TODO: verify and simplify the below code to determine whether + // something is a struct. + markStruct := false + if n.aStruct != nil { + markStruct = true + } else if len(n.node.Structs) > 0 { + markStruct = n.kind&StructKind != 0 && !n.hasTop + } + v := n.node.Value() + if n.node.BaseValue == nil && markStruct { + n.node.BaseValue = &StructMarker{} + v = n.node + } + if v != nil && IsConcrete(v) { + // Also check when we already have errors as we may find more + // serious errors and would like to know about all errors anyway. + + if n.lowerBound != nil { + if b := ctx.Validate(n.lowerBound, v); b != nil { + // TODO(errors): make Validate return boolean and generate + // optimized conflict message. Also track and inject IDs + // to determine origin location.s + if e, _ := b.Err.(*ValueError); e != nil { + e.AddPosition(n.lowerBound) + e.AddPosition(v) + } + n.addBottom(b) + } + } + if n.upperBound != nil { + if b := ctx.Validate(n.upperBound, v); b != nil { + // TODO(errors): make Validate return boolean and generate + // optimized conflict message. Also track and inject IDs + // to determine origin location.s + if e, _ := b.Err.(*ValueError); e != nil { + e.AddPosition(n.upperBound) + e.AddPosition(v) + } + n.addBottom(b) + } + } + // MOVE BELOW + // TODO(perf): only delay processing of actual non-monotonic checks. + skip := n.skipNonMonotonicChecks() + if v := n.node.Value(); v != nil && IsConcrete(v) && !skip { + for _, v := range n.checks { + // TODO(errors): make Validate return bottom and generate + // optimized conflict message. Also track and inject IDs + // to determine origin location.s + if b := ctx.Validate(v, n.node); b != nil { + n.addBottom(b) + } + } + } + } else if state == Finalized { + n.node.BaseValue = n.getValidators() + } + + if v == nil { + break + } + + switch { + case v.Kind() == ListKind: + for _, a := range n.node.Arcs { + if a.Label.Typ() == StringLabel { + n.addErr(ctx.Newf("list may not have regular fields")) + // TODO(errors): add positions for list and arc definitions. + + } + } + + // case !isStruct(n.node) && v.Kind() != BottomKind: + // for _, a := range n.node.Arcs { + // if a.Label.IsRegular() { + // n.addErr(errors.Newf(token.NoPos, + // // TODO(errors): add positions of non-struct values and arcs. + // "cannot combine scalar values with arcs")) + // } + // } + } + } + + if err := n.getErr(); err != nil { + if b, _ := n.node.BaseValue.(*Bottom); b != nil { + err = CombineErrors(nil, b, err) + } + n.node.BaseValue = err + // TODO: add return: if evaluation of arcs is important it can be done + // later. Logically we're done. + } + + n.completeArcs(state) +} + +func (n *nodeContext) incompleteErrors() *Bottom { + // collect incomplete errors. + var err *Bottom // n.incomplete + for _, d := range n.dynamicFields { + err = CombineErrors(nil, err, d.err) + } + for _, c := range n.comprehensions { + err = CombineErrors(nil, err, c.err) + } + for _, x := range n.exprs { + err = CombineErrors(nil, err, x.err) + } + if err == nil { + // safeguard. + err = incompleteSentinel + } + return err +} + +// TODO(perf): ideally we should always perform a closedness check if +// state is Finalized. This is currently not possible when computing a +// partial disjunction as the closedness information is not yet +// complete, possibly leading to a disjunct to be rejected prematurely. +// It is probably possible to fix this if we could add StructInfo +// structures demarked per conjunct. +// +// In practice this should not be a problem: when disjuncts originate +// from the same disjunct, they will have the same StructInfos, and thus +// Equal is able to equate them even in the precense of optional field. +// In general, combining any limited set of disjuncts will soon reach +// a fixed point where duplicate elements can be eliminated this way. +// +// Note that not checking closedness is irrelevant for disjunctions of +// scalars. This means it also doesn't hurt performance where structs +// have a discriminator field (e.g. Kubernetes). We should take care, +// though, that any potential performance issues are eliminated for +// Protobuf-like oneOf fields. +func (n *nodeContext) checkClosed(state VertexStatus) bool { + ignore := state != Finalized || n.skipNonMonotonicChecks() + + v := n.node + if !v.Label.IsInt() && v.Parent != nil && !ignore { + ctx := n.ctx + // Visit arcs recursively to validate and compute error. + if _, err := verifyArc2(ctx, v.Label, v, v.Closed); err != nil { + // Record error in child node to allow recording multiple + // conflicts at the appropriate place, to allow valid fields to + // be represented normally and, most importantly, to avoid + // recursive processing of a disallowed field. + v.SetValue(ctx, Finalized, err) + return false + } + } + return true +} + +func (n *nodeContext) completeArcs(state VertexStatus) { + if DebugSort > 0 { + DebugSortArcs(n.ctx, n.node) + } + + if state <= AllArcs { + n.node.UpdateStatus(AllArcs) + return + } + + n.node.UpdateStatus(EvaluatingArcs) + + ctx := n.ctx + + if !assertStructuralCycle(n) { + // Visit arcs recursively to validate and compute error. + for _, a := range n.node.Arcs { + if a.nonMonotonicInsertGen >= a.nonMonotonicLookupGen && a.nonMonotonicLookupGen > 0 { + err := ctx.Newf( + "cycle: field inserted by if clause that was previously evaluated by another if clause: %s", a.Label) + err.AddPosition(n.node) + n.node.BaseValue = &Bottom{Err: err} + } else if a.nonMonotonicReject { + err := ctx.Newf( + "cycle: field was added after an if clause evaluated it: %s", + a.Label) + err.AddPosition(n.node) + n.node.BaseValue = &Bottom{Err: err} + } + + // Call UpdateStatus here to be absolutely sure the status is set + // correctly and that we are not regressing. + n.node.UpdateStatus(EvaluatingArcs) + ctx.Unify(a, state) + // Don't set the state to Finalized if the child arcs are not done. + if state == Finalized && a.status < Finalized { + state = AllArcs + } + if err, _ := a.BaseValue.(*Bottom); err != nil { + n.node.AddChildError(err) + } + } + } + + n.node.UpdateStatus(state) +} + +func assertStructuralCycle(n *nodeContext) bool { + if cyclic := n.hasCycle && !n.hasNonCycle; cyclic { + n.node.BaseValue = CombineErrors(nil, + n.node.Value(), + &Bottom{ + Code: StructuralCycleError, + Err: n.ctx.Newf("structural cycle"), + Value: n.node.Value(), + // TODO: probably, this should have the referenced arc. + }) + // Don't process Arcs. This is mostly to ensure that no Arcs with + // an Unprocessed status remain in the output. + n.node.Arcs = nil + return true + } + return false +} + +// TODO: this is now a sentinel. Use a user-facing error that traces where +// the cycle originates. +var cycle = &Bottom{ + Err: errors.Newf(token.NoPos, "cycle error"), + Code: CycleError, +} + +func isCyclePlaceholder(v BaseValue) bool { + return v == cycle +} + +func (n *nodeContext) createDisjunct() *Disjunction { + a := make([]*Vertex, len(n.disjuncts)) + p := 0 + hasDefaults := false + for i, x := range n.disjuncts { + v := new(Vertex) + *v = x.result + v.state = nil + switch x.defaultMode { + case isDefault: + a[i] = a[p] + a[p] = v + p++ + hasDefaults = true + + case notDefault: + hasDefaults = true + fallthrough + case maybeDefault: + a[i] = v + } + } + // TODO: disambiguate based on concrete values. + // TODO: consider not storing defaults. + // if p > 0 { + // a = a[:p] + // } + return &Disjunction{ + Values: a, + NumDefaults: p, + HasDefaults: hasDefaults, + } +} + +type arcKey struct { + arc *Vertex + id CloseInfo +} + +// A nodeContext is used to collate all conjuncts of a value to facilitate +// unification. Conceptually order of unification does not matter. However, +// order has relevance when performing checks of non-monotic properities. Such +// checks should only be performed once the full value is known. +type nodeContext struct { + nextFree *nodeContext + refCount int + + ctx *OpContext + node *Vertex + + // usedArcs is a list of arcs that were looked up during non-monotonic operations, but do not exist yet. + usedArcs []*Vertex + + // TODO: (this is CL is first step) + // filter *Vertex a subset of composite with concrete fields for + // bloom-like filtering of disjuncts. We should first verify, however, + // whether some breath-first search gives sufficient performance, as this + // should already ensure a quick-fail for struct disjunctions with + // discriminators. + + arcMap []arcKey + + // snapshot holds the last value of the vertex before calling postDisjunct. + snapshot Vertex + + // Result holds the last evaluated value of the vertex after calling + // postDisjunct. + result Vertex + + // Current value (may be under construction) + scalar Value // TODO: use Value in node. + scalarID CloseInfo + + // Concrete conjuncts + kind Kind + kindExpr Expr // expr that adjust last value (for error reporting) + kindID CloseInfo // for error tracing + lowerBound *BoundValue // > or >= + upperBound *BoundValue // < or <= + checks []Validator // BuiltinValidator, other bound values. + errs *Bottom + + // Conjuncts holds a reference to the Vertex Arcs that still need + // processing. It does NOT need to be copied. + conjuncts []Conjunct + + // notify is used to communicate errors in cyclic dependencies. + // TODO: also use this to communicate increasingly more concrete values. + notify []*Vertex + + // Struct information + dynamicFields []envDynamic + comprehensions []envYield + aStruct Expr + aStructID CloseInfo + + // Expression conjuncts + lists []envList + vLists []*Vertex + exprs []envExpr + + hasTop bool + hasCycle bool // has conjunct with structural cycle + hasNonCycle bool // has conjunct without structural cycle + + // Disjunction handling + disjunctions []envDisjunct + + // usedDefault indicates the for each of possibly multiple parent + // disjunctions whether it is unified with a default disjunct or not. + // This is then later used to determine whether a disjunction should + // be treated as a marked disjunction. + usedDefault []defaultInfo + + defaultMode defaultMode + disjuncts []*nodeContext + buffer []*nodeContext + disjunctErrs []*Bottom +} + +type defaultInfo struct { + // parentMode indicates whether this values was used as a default value, + // based on the parent mode. + parentMode defaultMode + + // The result of default evaluation for a nested disjunction. + nestedMode defaultMode + + origMode defaultMode +} + +func (n *nodeContext) addNotify(v *Vertex) { + if v != nil { + n.notify = append(n.notify, v) + } +} + +func (n *nodeContext) clone() *nodeContext { + d := n.ctx.newNodeContext(n.node) + + d.refCount++ + + d.ctx = n.ctx + d.node = n.node + + d.scalar = n.scalar + d.scalarID = n.scalarID + d.kind = n.kind + d.kindExpr = n.kindExpr + d.kindID = n.kindID + d.aStruct = n.aStruct + d.aStructID = n.aStructID + d.hasTop = n.hasTop + + d.lowerBound = n.lowerBound + d.upperBound = n.upperBound + d.errs = n.errs + d.hasTop = n.hasTop + d.hasCycle = n.hasCycle + d.hasNonCycle = n.hasNonCycle + + // d.arcMap = append(d.arcMap, n.arcMap...) // XXX add? + // d.usedArcs = append(d.usedArcs, n.usedArcs...) // XXX: add? + d.notify = append(d.notify, n.notify...) + d.checks = append(d.checks, n.checks...) + d.dynamicFields = append(d.dynamicFields, n.dynamicFields...) + d.comprehensions = append(d.comprehensions, n.comprehensions...) + d.lists = append(d.lists, n.lists...) + d.vLists = append(d.vLists, n.vLists...) + d.exprs = append(d.exprs, n.exprs...) + d.usedDefault = append(d.usedDefault, n.usedDefault...) + + // No need to clone d.disjunctions + + return d +} + +func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { + if n := c.freeListNode; n != nil { + c.stats.Reused++ + c.freeListNode = n.nextFree + + *n = nodeContext{ + ctx: c, + node: node, + kind: TopKind, + usedArcs: n.usedArcs[:0], + arcMap: n.arcMap[:0], + notify: n.notify[:0], + checks: n.checks[:0], + dynamicFields: n.dynamicFields[:0], + comprehensions: n.comprehensions[:0], + lists: n.lists[:0], + vLists: n.vLists[:0], + exprs: n.exprs[:0], + disjunctions: n.disjunctions[:0], + usedDefault: n.usedDefault[:0], + disjunctErrs: n.disjunctErrs[:0], + disjuncts: n.disjuncts[:0], + buffer: n.buffer[:0], + } + + return n + } + c.stats.Allocs++ + + return &nodeContext{ + ctx: c, + node: node, + kind: TopKind, + } +} + +func (v *Vertex) getNodeContext(c *OpContext) *nodeContext { + if v.state == nil { + if v.status == Finalized { + return nil + } + v.state = c.newNodeContext(v) + } else if v.state.node != v { + panic("getNodeContext: nodeContext out of sync") + } + v.state.refCount++ + return v.state +} + +func (v *Vertex) freeNode(n *nodeContext) { + if n == nil { + return + } + if n.node != v { + panic("freeNode: unpaired free") + } + if v.state != nil && v.state != n { + panic("freeNode: nodeContext out of sync") + } + if n.refCount--; n.refCount == 0 { + if v.status == Finalized { + v.freeNodeState() + } else { + n.ctx.stats.Retained++ + } + } +} + +func (v *Vertex) freeNodeState() { + if v.state == nil { + return + } + state := v.state + v.state = nil + + state.ctx.freeNodeContext(state) +} + +func (n *nodeContext) free() { + if n.refCount--; n.refCount == 0 { + n.ctx.freeNodeContext(n) + } +} + +func (c *OpContext) freeNodeContext(n *nodeContext) { + c.stats.Freed++ + n.nextFree = c.freeListNode + c.freeListNode = n + n.node = nil + n.refCount = 0 +} + +// TODO(perf): return a dedicated ConflictError that can track original +// positions on demand. +func (n *nodeContext) reportConflict( + v1, v2 Node, + k1, k2 Kind, + ids ...CloseInfo) { + + ctx := n.ctx + + var err *ValueError + if k1 == k2 { + err = ctx.NewPosf(token.NoPos, "conflicting values %s and %s", v1, v2) + } else { + err = ctx.NewPosf(token.NoPos, + "conflicting values %s and %s (mismatched types %s and %s)", + v1, v2, k1, k2) + } + + err.AddPosition(v1) + err.AddPosition(v2) + for _, id := range ids { + err.AddClosedPositions(id) + } + + n.addErr(err) +} + +// reportFieldMismatch reports the mixture of regular fields with non-struct +// values. Either s or f needs to be given. +func (n *nodeContext) reportFieldMismatch( + p token.Pos, + s *StructLit, + f Feature, + scalar Expr, + id ...CloseInfo) { + + ctx := n.ctx + + if f == InvalidLabel { + for _, a := range s.Decls { + if x, ok := a.(*Field); ok && x.Label.IsRegular() { + f = x.Label + p = pos(x) + break + } + } + if f == InvalidLabel { + n.reportConflict(scalar, s, n.kind, StructKind, id...) + return + } + } + + err := ctx.NewPosf(p, "cannot combine regular field %q with %v", f, scalar) + + if s != nil { + err.AddPosition(s) + } + + for _, ci := range id { + err.AddClosedPositions(ci) + } + + n.addErr(err) +} + +func (n *nodeContext) updateNodeType(k Kind, v Expr, id CloseInfo) bool { + ctx := n.ctx + kind := n.kind & k + + switch { + case n.kind == BottomKind, + k == BottomKind: + return false + + case kind != BottomKind: + + // TODO: we could consider changing the reporting for structs, but this + // makes only sense in case they are for embeddings. Otherwise the type + // of a struct is more relevant for the failure. + // case k == StructKind: + // s, _ := v.(*StructLit) + // n.reportFieldMismatch(token.NoPos, s, 0, n.kindExpr, id, n.kindID) + + case n.kindExpr != nil: + n.reportConflict(n.kindExpr, v, n.kind, k, n.kindID, id) + + default: + n.addErr(ctx.Newf( + "conflicting value %s (mismatched types %s and %s)", + v, n.kind, k)) + } + + if n.kind != kind || n.kindExpr == nil { + n.kindExpr = v + } + n.kind = kind + return kind != BottomKind +} + +func (n *nodeContext) done() bool { + return len(n.dynamicFields) == 0 && + len(n.comprehensions) == 0 && + len(n.exprs) == 0 +} + +// finalDone is like done, but allows for cycle errors, which can be ignored +// as they essentially indicate a = a & _. +func (n *nodeContext) finalDone() bool { + for _, x := range n.exprs { + if x.err.Code != CycleError { + return false + } + } + return len(n.dynamicFields) == 0 && len(n.comprehensions) == 0 +} + +// hasErr is used to determine if an evaluation path, for instance a single +// path after expanding all disjunctions, has an error. +func (n *nodeContext) hasErr() bool { + if n.node.ChildErrors != nil { + return true + } + if n.node.Status() > Evaluating && n.node.IsErr() { + return true + } + return n.ctx.HasErr() || n.errs != nil +} + +func (n *nodeContext) getErr() *Bottom { + n.errs = CombineErrors(nil, n.errs, n.ctx.Err()) + return n.errs +} + +// getValidators sets the vertex' Value in case there was no concrete value. +func (n *nodeContext) getValidators() BaseValue { + ctx := n.ctx + + a := []Value{} + // if n.node.Value != nil { + // a = append(a, n.node.Value) + // } + kind := TopKind + if n.lowerBound != nil { + a = append(a, n.lowerBound) + kind &= n.lowerBound.Kind() + } + if n.upperBound != nil { + a = append(a, n.upperBound) + kind &= n.upperBound.Kind() + } + for _, c := range n.checks { + // Drop !=x if x is out of bounds with another bound. + if b, _ := c.(*BoundValue); b != nil && b.Op == NotEqualOp { + if n.upperBound != nil && + SimplifyBounds(ctx, n.kind, n.upperBound, b) != nil { + continue + } + if n.lowerBound != nil && + SimplifyBounds(ctx, n.kind, n.lowerBound, b) != nil { + continue + } + } + a = append(a, c) + kind &= c.Kind() + } + + if kind&^n.kind != 0 { + a = append(a, &BasicType{ + Src: n.kindExpr.Source(), // TODO:Is this always a BasicType? + K: n.kind, + }) + } + + var v BaseValue + switch len(a) { + case 0: + // Src is the combined input. + v = &BasicType{K: n.kind} + + case 1: + v = a[0].(Value) // remove cast + + default: + v = &Conjunction{Values: a} + } + + return v +} + +// TODO: this function can probably go as this is now handled in the nodeContext. +func (n *nodeContext) maybeSetCache() { + if n.node.Status() > Partial { // n.node.BaseValue != nil + return + } + if n.scalar != nil { + n.node.BaseValue = n.scalar + } + // NOTE: this is now handled by associating the nodeContext + // if n.errs != nil { + // n.node.SetValue(n.ctx, Partial, n.errs) + // } +} + +type envExpr struct { + c Conjunct + err *Bottom +} + +type envDynamic struct { + env *Environment + field *DynamicField + id CloseInfo + err *Bottom +} + +type envList struct { + env *Environment + list *ListLit + n int64 // recorded length after evaluator + elipsis *Ellipsis + id CloseInfo +} + +func (n *nodeContext) addBottom(b *Bottom) { + n.errs = CombineErrors(nil, n.errs, b) + // TODO(errors): consider doing this + // n.kindExpr = n.errs + // n.kind = 0 +} + +func (n *nodeContext) addErr(err errors.Error) { + if err != nil { + n.addBottom(&Bottom{Err: err}) + } +} + +// addExprConjuncts will attempt to evaluate an Expr and insert the value +// into the nodeContext if successful or queue it for later evaluation if it is +// incomplete or is not value. +func (n *nodeContext) addExprConjunct(v Conjunct) { + env := v.Env + id := v.CloseInfo + + switch x := v.Elem().(type) { + case *Vertex: + if x.IsData() { + n.addValueConjunct(env, x, id) + } else { + n.addVertexConjuncts(v, x, true) + } + + case Value: + n.addValueConjunct(env, x, id) + + case *BinaryExpr: + if x.Op == AndOp { + n.addExprConjunct(MakeConjunct(env, x.X, id)) + n.addExprConjunct(MakeConjunct(env, x.Y, id)) + } else { + n.evalExpr(v) + } + + case *StructLit: + n.addStruct(env, x, id) + + case *ListLit: + childEnv := &Environment{ + Up: env, + Vertex: n.node, + } + if env != nil { + childEnv.Cyclic = env.Cyclic + childEnv.Deref = env.Deref + } + n.lists = append(n.lists, envList{env: childEnv, list: x, id: id}) + + case *DisjunctionExpr: + n.addDisjunction(env, x, id) + + default: + // Must be Resolver or Evaluator. + n.evalExpr(v) + } +} + +// evalExpr is only called by addExprConjunct. If an error occurs, it records +// the error in n and returns nil. +func (n *nodeContext) evalExpr(v Conjunct) { + // Require an Environment. + ctx := n.ctx + + closeID := v.CloseInfo + + // TODO: see if we can do without these counters. + for _, d := range v.Env.Deref { + d.EvalCount++ + } + for _, d := range v.Env.Cycles { + d.SelfCount++ + } + defer func() { + for _, d := range v.Env.Deref { + d.EvalCount-- + } + for _, d := range v.Env.Cycles { + d.SelfCount++ + } + }() + + switch x := v.Expr().(type) { + case Resolver: + arc, err := ctx.Resolve(v.Env, x) + if err != nil && !err.IsIncomplete() { + n.addBottom(err) + break + } + if arc == nil { + n.exprs = append(n.exprs, envExpr{v, err}) + break + } + + n.addVertexConjuncts(v, arc, false) + + case Evaluator: + // Interpolation, UnaryExpr, BinaryExpr, CallExpr + // Could be unify? + val := ctx.evaluateRec(v.Env, v.Expr(), Partial) + if b, ok := val.(*Bottom); ok && b.IsIncomplete() { + n.exprs = append(n.exprs, envExpr{v, b}) + break + } + + if v, ok := val.(*Vertex); ok { + // Handle generated disjunctions (as in the 'or' builtin). + // These come as a Vertex, but should not be added as a value. + b, ok := v.BaseValue.(*Bottom) + if ok && b.IsIncomplete() && len(v.Conjuncts) > 0 { + for _, c := range v.Conjuncts { + c.CloseInfo = closeID + n.addExprConjunct(c) + } + break + } + } + + // TODO: also to through normal Vertex handling here. At the moment + // addValueConjunct handles StructMarker.NeedsClose, as this is always + // only needed when evaluation an Evaluator, and not a Resolver. + // The two code paths should ideally be merged once this separate + // mechanism is eliminated. + // + // if arc, ok := val.(*Vertex); ok && !arc.IsData() { + // n.addVertexConjuncts(v.Env, closeID, v.Expr(), arc) + // break + // } + + // TODO: insert in vertex as well + n.addValueConjunct(v.Env, val, closeID) + + default: + panic(fmt.Sprintf("unknown expression of type %T", x)) + } +} + +func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) { + closeInfo := c.CloseInfo + + // We need to ensure that each arc is only unified once (or at least) a + // bounded time, witch each conjunct. Comprehensions, for instance, may + // distribute a value across many values that get unified back into the + // same value. If such a value is a disjunction, than a disjunction of N + // disjuncts will result in a factor N more unifications for each + // occurrence of such value, resulting in exponential running time. This + // is especially common values that are used as a type. + // + // However, unification is idempotent, so each such conjunct only needs + // to be unified once. This cache checks for this and prevents an + // exponential blowup in such case. + // + // TODO(perf): this cache ensures the conjuncts of an arc at most once + // per ID. However, we really need to add the conjuncts of an arc only + // once total, and then add the close information once per close ID + // (pointer can probably be shared). Aside from being more performant, + // this is probably the best way to guarantee that conjunctions are + // linear in this case. + key := arcKey{arc, closeInfo} + for _, k := range n.arcMap { + if key == k { + return + } + } + n.arcMap = append(n.arcMap, key) + + env := c.Env + // Pass detection of structural cycles from parent to children. + cyclic := false + if env != nil { + // If a reference is in a tainted set, so is the value it refers to. + cyclic = env.Cyclic + } + + status := arc.Status() + + switch status { + case Evaluating: + // Reference cycle detected. We have reached a fixed point and + // adding conjuncts at this point will not change the value. Also, + // continuing to pursue this value will result in an infinite loop. + + // TODO: add a mechanism so that the computation will only have to + // be done once? + + if arc == n.node { + // TODO: we could use node sharing here. This may avoid an + // exponential blowup during evaluation, like is possible with + // YAML. + return + } + + case EvaluatingArcs: + // Structural cycle detected. Continue evaluation as usual, but + // keep track of whether any other conjuncts without structural + // cycles are added. If not, evaluation of child arcs will end + // with this node. + + // For the purpose of determining whether at least one non-cyclic + // conjuncts exists, we consider all conjuncts of a cyclic conjuncts + // also cyclic. + + cyclic = true + n.hasCycle = true + + // As the EvaluatingArcs mechanism bypasses the self-reference + // mechanism, we need to separately keep track of it here. + // If this (originally) is a self-reference node, adding them + // will result in recursively adding the same reference. For this + // we also mark the node as evaluating. + if arc.SelfCount > 0 { + return + } + + // This count is added for values that are directly added below. + // The count is handled separately for delayed values. + arc.SelfCount++ + defer func() { arc.SelfCount-- }() + } + + // Performance: the following if check filters cases that are not strictly + // necessary for correct functioning. Not updating the closeInfo may cause + // some position information to be lost for top-level positions of merges + // resulting form APIs. These tend to be fairly uninteresting. + // At the same time, this optimization may prevent considerable slowdown + // in case an API does many calls to Unify. + x := c.Expr() + if !inline || arc.IsClosedStruct() || arc.IsClosedList() { + closeInfo = closeInfo.SpawnRef(arc, IsDef(x), x) + } + + if arc.status == 0 && !inline { + // This is a rare condition, but can happen in certain + // evaluation orders. Unfortunately, adding this breaks + // resolution of cyclic mutually referring disjunctions. But it + // is necessary to prevent lookups in unevaluated structs. + // TODO(cycles): this can probably most easily be fixed with a + // having a more recursive implementation. + n.ctx.Unify(arc, Partial) + } + + for _, c := range arc.Conjuncts { + var a []*Vertex + if env != nil { + a = env.Deref + } + if inline { + c = updateCyclic(c, cyclic, nil, nil) + } else { + c = updateCyclic(c, cyclic, arc, a) + } + + // Note that we are resetting the tree here. We hereby assume that + // closedness conflicts resulting from unifying the referenced arc were + // already caught there and that we can ignore further errors here. + c.CloseInfo = closeInfo + n.addExprConjunct(c) + } +} + +// isDef reports whether an expressions is a reference that references a +// definition anywhere in its selection path. +// +// TODO(performance): this should be merged with resolve(). But for now keeping +// this code isolated makes it easier to see what it is for. +func isDef(x Expr) bool { + switch r := x.(type) { + case *FieldReference: + return r.Label.IsDef() + + case *SelectorExpr: + if r.Sel.IsDef() { + return true + } + return isDef(r.X) + + case *IndexExpr: + return isDef(r.X) + } + return false +} + +// updateCyclicStatus looks for proof of non-cyclic conjuncts to override +// a structural cycle. +func (n *nodeContext) updateCyclicStatus(env *Environment) { + if env == nil || !env.Cyclic { + n.hasNonCycle = true + } +} + +func updateCyclic(c Conjunct, cyclic bool, deref *Vertex, a []*Vertex) Conjunct { + env := c.Env + switch { + case env == nil: + if !cyclic && deref == nil { + return c + } + env = &Environment{Cyclic: cyclic} + case deref == nil && env.Cyclic == cyclic && len(a) == 0: + return c + default: + // The conjunct may still be in use in other fields, so we should + // make a new copy to mark Cyclic only for this case. + e := *env + e.Cyclic = e.Cyclic || cyclic + env = &e + } + if deref != nil || len(a) > 0 { + cp := make([]*Vertex, 0, len(a)+1) + cp = append(cp, a...) + if deref != nil { + cp = append(cp, deref) + } + env.Deref = cp + } + if deref != nil { + env.Cycles = append(env.Cycles, deref) + } + return MakeConjunct(env, c.Elem(), c.CloseInfo) +} + +func (n *nodeContext) addValueConjunct(env *Environment, v Value, id CloseInfo) { + n.updateCyclicStatus(env) + + ctx := n.ctx + + if x, ok := v.(*Vertex); ok { + if m, ok := x.BaseValue.(*StructMarker); ok { + n.aStruct = x + n.aStructID = id + if m.NeedClose { + id = id.SpawnRef(x, IsDef(x), x) + id.IsClosed = true + } + } + + cyclic := env != nil && env.Cyclic + + if !x.IsData() { + // TODO: this really shouldn't happen anymore. + if isComplexStruct(ctx, x) { + // This really shouldn't happen, but just in case. + n.addVertexConjuncts(MakeConjunct(env, x, id), x, true) + return + } + + for _, c := range x.Conjuncts { + c = updateCyclic(c, cyclic, nil, nil) + c.CloseInfo = id + n.addExprConjunct(c) // TODO: Pass from eval + } + return + } + + // TODO: evaluate value? + switch v := x.BaseValue.(type) { + default: + panic(fmt.Sprintf("invalid type %T", x.BaseValue)) + + case *ListMarker: + n.vLists = append(n.vLists, x) + return + + case *StructMarker: + + case Value: + n.addValueConjunct(env, v, id) + } + + if len(x.Arcs) == 0 { + return + } + + s := &StructLit{} + + // Keep ordering of Go struct for topological sort. + n.node.AddStruct(s, env, id) + n.node.Structs = append(n.node.Structs, x.Structs...) + + for _, a := range x.Arcs { + // TODO(errors): report error when this is a regular field. + c := MakeConjunct(nil, a, id) + c = updateCyclic(c, cyclic, nil, nil) + n.insertField(a.Label, c) + s.MarkField(a.Label) + } + return + } + + switch b := v.(type) { + case *Bottom: + n.addBottom(b) + return + case *Builtin: + if v := b.BareValidator(); v != nil { + n.addValueConjunct(env, v, id) + return + } + } + + if !n.updateNodeType(v.Kind(), v, id) { + return + } + + switch x := v.(type) { + case *Disjunction: + n.addDisjunctionValue(env, x, id) + + case *Conjunction: + for _, x := range x.Values { + n.addValueConjunct(env, x, id) + } + + case *Top: + n.hasTop = true + + case *BasicType: + // handled above + + case *BoundValue: + switch x.Op { + case LessThanOp, LessEqualOp: + if y := n.upperBound; y != nil { + n.upperBound = nil + v := SimplifyBounds(ctx, n.kind, x, y) + if err := valueError(v); err != nil { + err.AddPosition(v) + err.AddPosition(n.upperBound) + err.AddClosedPositions(id) + } + n.addValueConjunct(env, v, id) + return + } + n.upperBound = x + + case GreaterThanOp, GreaterEqualOp: + if y := n.lowerBound; y != nil { + n.lowerBound = nil + v := SimplifyBounds(ctx, n.kind, x, y) + if err := valueError(v); err != nil { + err.AddPosition(v) + err.AddPosition(n.lowerBound) + err.AddClosedPositions(id) + } + n.addValueConjunct(env, v, id) + return + } + n.lowerBound = x + + case EqualOp, NotEqualOp, MatchOp, NotMatchOp: + // This check serves as simplifier, but also to remove duplicates. + k := 0 + match := false + for _, c := range n.checks { + if y, ok := c.(*BoundValue); ok { + switch z := SimplifyBounds(ctx, n.kind, x, y); { + case z == y: + match = true + case z == x: + continue + } + } + n.checks[k] = c + k++ + } + n.checks = n.checks[:k] + if !match { + n.checks = append(n.checks, x) + } + return + } + + case Validator: + // This check serves as simplifier, but also to remove duplicates. + for i, y := range n.checks { + if b := SimplifyValidator(ctx, x, y); b != nil { + n.checks[i] = b + return + } + } + n.updateNodeType(x.Kind(), x, id) + n.checks = append(n.checks, x) + + case *Vertex: + // handled above. + + case Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit, *Builtin + if y := n.scalar; y != nil { + if b, ok := BinOp(ctx, EqualOp, x, y).(*Bool); !ok || !b.B { + n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id) + } + // TODO: do we need to explicitly add again? + // n.scalar = nil + // n.addValueConjunct(c, BinOp(c, EqualOp, x, y)) + break + } + n.scalar = x + n.scalarID = id + + default: + panic(fmt.Sprintf("unknown value type %T", x)) + } + + if n.lowerBound != nil && n.upperBound != nil { + if u := SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil { + if err := valueError(u); err != nil { + err.AddPosition(n.lowerBound) + err.AddPosition(n.upperBound) + err.AddClosedPositions(id) + } + n.lowerBound = nil + n.upperBound = nil + n.addValueConjunct(env, u, id) + } + } +} + +func valueError(v Value) *ValueError { + if v == nil { + return nil + } + b, _ := v.(*Bottom) + if b == nil { + return nil + } + err, _ := b.Err.(*ValueError) + if err == nil { + return nil + } + return err +} + +// addStruct collates the declarations of a struct. +// +// addStruct fulfills two additional pivotal functions: +// 1) Implement vertex unification (this happens through De Bruijn indices +// combined with proper set up of Environments). +// 2) Implied closedness for definitions. +// +func (n *nodeContext) addStruct( + env *Environment, + s *StructLit, + closeInfo CloseInfo) { + + n.updateCyclicStatus(env) // to handle empty structs. + + // NOTE: This is a crucial point in the code: + // Unification derferencing happens here. The child nodes are set to + // an Environment linked to the current node. Together with the De Bruijn + // indices, this determines to which Vertex a reference resolves. + + // TODO(perf): consider using environment cache: + // var childEnv *Environment + // for _, s := range n.nodeCache.sub { + // if s.Up == env { + // childEnv = s + // } + // } + childEnv := &Environment{ + Up: env, + Vertex: n.node, + } + if env != nil { + childEnv.Cyclic = env.Cyclic + childEnv.Deref = env.Deref + } + + s.Init() + + if s.HasEmbed && !s.IsFile() { + closeInfo = closeInfo.SpawnGroup(nil) + } + + parent := n.node.AddStruct(s, childEnv, closeInfo) + closeInfo.IsClosed = false + parent.Disable = true // disable until processing is done. + + for _, d := range s.Decls { + switch x := d.(type) { + case *Field: + // handle in next iteration. + + case *DynamicField: + n.aStruct = s + n.aStructID = closeInfo + n.dynamicFields = append(n.dynamicFields, envDynamic{childEnv, x, closeInfo, nil}) + + case *Comprehension: + n.insertComprehension(childEnv, x, closeInfo) + + case Expr: + // add embedding to optional + + // TODO(perf): only do this if addExprConjunct below will result in + // a fieldSet. Otherwise the entry will just be removed next. + id := closeInfo.SpawnEmbed(x) + + // push and opo embedding type. + n.addExprConjunct(MakeConjunct(childEnv, x, id)) + + case *OptionalField, *BulkOptionalField, *Ellipsis: + // Nothing to do here. Note that the precense of these fields do not + // excluded embedded scalars: only when they match actual fields + // does it exclude those. + + default: + panic("unreachable") + } + } + + if !s.HasEmbed { + n.aStruct = s + n.aStructID = closeInfo + } + + parent.Disable = false + + for _, d := range s.Decls { + switch x := d.(type) { + case *Field: + if x.Label.IsString() { + n.aStruct = s + n.aStructID = closeInfo + } + n.insertField(x.Label, MakeConjunct(childEnv, x, closeInfo)) + } + } +} + +// TODO(perf): if an arc is the only arc with that label added to a Vertex, and +// if there are no conjuncts of optional fields to be added, then the arc could +// be added as is until any of these conditions change. This would allow +// structure sharing in many cases. One should be careful, however, to +// recursively track arcs of previously unified evaluated vertices ot make this +// optimization meaningful. +// +// An alternative approach to avoid evaluating optional arcs (if we take that +// route) is to not recursively evaluate those arcs, even for Finalize. This is +// possible as it is not necessary to evaluate optional arcs to evaluate +// disjunctions. +func (n *nodeContext) insertField(f Feature, x Conjunct) *Vertex { + ctx := n.ctx + arc, _ := n.node.GetArc(ctx, f) + + arc.addConjunct(x) + + switch { + case arc.state != nil: + s := arc.state + switch { + case arc.Status() <= AllArcs: + // This may happen when a struct has multiple comprehensions, where + // the insertion of one of which depends on the outcome of another. + + // TODO: to something more principled by allowing values to + // monotonically increase. + arc.status = Partial + arc.BaseValue = nil + s.disjuncts = s.disjuncts[:0] + s.disjunctErrs = s.disjunctErrs[:0] + + fallthrough + + default: + arc.state.addExprConjunct(x) + } + + case arc.Status() == 0: + default: + n.addBottom(&Bottom{ + Code: IncompleteError, + Err: ctx.NewPosf(pos(x.Field()), + "cannot add field %s: was already used", + f.SelectorString(ctx)), + }) + } + return arc +} + +// expandOne adds dynamic fields to a node until a fixed point is reached. +// On each iteration, dynamic fields that cannot resolve due to incomplete +// values are skipped. They will be retried on the next iteration until no +// progress can be made. Note that a dynamic field may add more dynamic fields. +// +// forClauses are processed after all other clauses. A struct may be referenced +// before it is complete, meaning that fields added by other forms of injection +// may influence the result of a for clause _after_ it has already been +// processed. We could instead detect such insertion and feed it to the +// ForClause to generate another entry or have the for clause be recomputed. +// This seems to be too complicated and lead to iffy edge cases. +// TODO(errors): detect when a field is added to a struct that is already used +// in a for clause. +func (n *nodeContext) expandOne() (done bool) { + // Don't expand incomplete expressions if we detected a cycle. + if n.done() || (n.hasCycle && !n.hasNonCycle) { + return false + } + + var progress bool + + if progress = n.injectDynamic(); progress { + return true + } + + if progress = n.injectComprehensions(&(n.comprehensions)); progress { + return true + } + + // Do expressions after comprehensions, as comprehensions can never + // refer to embedded scalars, whereas expressions may refer to generated + // fields if we were to allow attributes to be defined alongside + // scalars. + exprs := n.exprs + n.exprs = n.exprs[:0] + for _, x := range exprs { + n.addExprConjunct(x.c) + + // collect and and or + } + if len(n.exprs) < len(exprs) { + return true + } + + // No progress, report error later if needed: unification with + // disjuncts may resolve this later later on. + return false +} + +// injectDynamic evaluates and inserts dynamic declarations. +func (n *nodeContext) injectDynamic() (progress bool) { + ctx := n.ctx + k := 0 + + a := n.dynamicFields + for _, d := range n.dynamicFields { + var f Feature + v, complete := ctx.Evaluate(d.env, d.field.Key) + if !complete { + d.err, _ = v.(*Bottom) + a[k] = d + k++ + continue + } + if b, _ := v.(*Bottom); b != nil { + n.addValueConjunct(nil, b, d.id) + continue + } + f = ctx.Label(d.field.Key, v) + if f.IsInt() { + n.addErr(ctx.NewPosf(pos(d.field.Key), "integer fields not supported")) + } + n.insertField(f, MakeConjunct(d.env, d.field, d.id)) + } + + progress = k < len(n.dynamicFields) + + n.dynamicFields = a[:k] + + return progress +} + +// addLists +// +// TODO: association arrays: +// If an association array marker was present in a struct, create a struct node +// instead of a list node. In either case, a node may only have list fields +// or struct fields and not both. +// +// addLists should be run after the fixpoint expansion: +// - it enforces that comprehensions may not refer to the list itself +// - there may be no other fields within the list. +// +// TODO(embeddedScalars): for embedded scalars, there should be another pass +// of evaluation expressions after expanding lists. +func (n *nodeContext) addLists() (oneOfTheLists Expr, anID CloseInfo) { + if len(n.lists) == 0 && len(n.vLists) == 0 { + return nil, CloseInfo{} + } + + isOpen := true + max := 0 + var maxNode Expr + + if m, ok := n.node.BaseValue.(*ListMarker); ok { + isOpen = m.IsOpen + max = len(n.node.Arcs) + } + + c := n.ctx + + for _, l := range n.vLists { + oneOfTheLists = l + + elems := l.Elems() + isClosed := l.IsClosedList() + + switch { + case len(elems) < max: + if isClosed { + n.invalidListLength(len(elems), max, l, maxNode) + continue + } + + case len(elems) > max: + if !isOpen { + n.invalidListLength(max, len(elems), maxNode, l) + continue + } + isOpen = !isClosed + max = len(elems) + maxNode = l + + case isClosed: + isOpen = false + maxNode = l + } + + for _, a := range elems { + if a.Conjuncts == nil { + x := a.BaseValue.(Value) + n.insertField(a.Label, MakeConjunct(nil, x, CloseInfo{})) + continue + } + for _, c := range a.Conjuncts { + n.insertField(a.Label, c) + } + } + } + +outer: + for i, l := range n.lists { + n.updateCyclicStatus(l.env.Up) + + index := int64(0) + hasComprehension := false + for j, elem := range l.list.Elems { + switch x := elem.(type) { + case *Comprehension: + err := c.Yield(l.env, x, func(e *Environment) { + label, err := MakeLabel(x.Source(), index, IntLabel) + n.addErr(err) + index++ + c := MakeConjunct(e, x.Value, l.id) + n.insertField(label, c) + }) + hasComprehension = true + if err != nil { + n.addBottom(err) + continue outer + } + + case *Ellipsis: + if j != len(l.list.Elems)-1 { + n.addErr(c.Newf("ellipsis must be last element in list")) + } + + n.lists[i].elipsis = x + + default: + label, err := MakeLabel(x.Source(), index, IntLabel) + n.addErr(err) + index++ // TODO: don't use insertField. + n.insertField(label, MakeConjunct(l.env, x, l.id)) + } + + // Terminate early in case of runaway comprehension. + if !isOpen && int(index) > max { + n.invalidListLength(max, len(l.list.Elems), maxNode, l.list) + continue outer + } + } + + oneOfTheLists = l.list + anID = l.id + + switch closed := n.lists[i].elipsis == nil; { + case int(index) < max: + if closed { + n.invalidListLength(int(index), max, l.list, maxNode) + continue + } + + case int(index) > max, + closed && isOpen, + (!closed == isOpen) && !hasComprehension: + max = int(index) + maxNode = l.list + isOpen = !closed + } + + n.lists[i].n = index + } + + // add additionalItem values to list and construct optionals. + elems := n.node.Elems() + for _, l := range n.vLists { + if !l.IsClosedList() { + continue + } + + newElems := l.Elems() + if len(newElems) >= len(elems) { + continue // error generated earlier, if applicable. + } + + for _, arc := range elems[len(newElems):] { + l.MatchAndInsert(c, arc) + } + } + + for _, l := range n.lists { + if l.elipsis == nil { + continue + } + + s := l.list.info + if s == nil { + s = &StructLit{Decls: []Decl{l.elipsis}} + s.Init() + l.list.info = s + } + info := n.node.AddStruct(s, l.env, l.id) + + for _, arc := range elems[l.n:] { + info.MatchAndInsert(c, arc) + } + } + + sources := []ast.Expr{} + // Add conjuncts for additional items. + for _, l := range n.lists { + if l.elipsis == nil { + continue + } + if src, _ := l.elipsis.Source().(ast.Expr); src != nil { + sources = append(sources, src) + } + } + + if m, ok := n.node.BaseValue.(*ListMarker); !ok { + n.node.SetValue(c, Partial, &ListMarker{ + Src: ast.NewBinExpr(token.AND, sources...), + IsOpen: isOpen, + }) + } else { + if expr, _ := m.Src.(ast.Expr); expr != nil { + sources = append(sources, expr) + } + m.Src = ast.NewBinExpr(token.AND, sources...) + m.IsOpen = m.IsOpen && isOpen + } + + n.lists = n.lists[:0] + n.vLists = n.vLists[:0] + + return oneOfTheLists, anID +} + +func (n *nodeContext) invalidListLength(na, nb int, a, b Expr) { + n.addErr(n.ctx.Newf("incompatible list lengths (%d and %d)", na, nb)) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/expr.go b/vendor/cuelang.org/go/internal/core/adt/expr.go new file mode 100644 index 0000000000..31aeda234f --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/expr.go @@ -0,0 +1,1759 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "bytes" + "fmt" + "io" + "regexp" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// A StructLit represents an unevaluated struct literal or file body. +type StructLit struct { + Src ast.Node // ast.File or ast.StructLit + Decls []Decl + + // TODO: record the merge order somewhere. + + // The below fields are redundant to Decls and are computed with Init. + + // field marks the optional conjuncts of all explicit Fields. + // Required Fields are marked as empty + Fields []FieldInfo + + Dynamic []*DynamicField + + // excluded are all literal fields that already exist. + Bulk []*BulkOptionalField + + Additional []*Ellipsis + HasEmbed bool + IsOpen bool // has a ... + initialized bool + + types OptionalType + + // administrative fields like hasreferences. + // hasReferences bool +} + +func (o *StructLit) IsFile() bool { + _, ok := o.Src.(*ast.File) + return ok +} + +type FieldInfo struct { + Label Feature + Optional []Node +} + +func (x *StructLit) HasOptional() bool { + return x.types&(HasField|HasPattern|HasAdditional) != 0 +} + +func (x *StructLit) Source() ast.Node { return x.Src } + +func (x *StructLit) evaluate(c *OpContext) Value { + e := c.Env(0) + v := &Vertex{ + Parent: e.Vertex, + Conjuncts: []Conjunct{{e, x, CloseInfo{}}}, + } + // evaluate may not finalize a field, as the resulting value may be + // used in a context where more conjuncts are added. It may also lead + // to disjuncts being in a partially expanded state, leading to + // misaligned nodeContexts. + c.Unify(v, AllArcs) + return v +} + +// TODO: remove this method +func (o *StructLit) MarkField(f Feature) { + o.Fields = append(o.Fields, FieldInfo{Label: f}) +} + +func (o *StructLit) Init() { + if o.initialized { + return + } + o.initialized = true + for _, d := range o.Decls { + switch x := d.(type) { + case *Field: + if o.fieldIndex(x.Label) < 0 { + o.Fields = append(o.Fields, FieldInfo{Label: x.Label}) + } + + case *OptionalField: + p := o.fieldIndex(x.Label) + if p < 0 { + p = len(o.Fields) + o.Fields = append(o.Fields, FieldInfo{Label: x.Label}) + } + o.Fields[p].Optional = append(o.Fields[p].Optional, x) + o.types |= HasField + + case *DynamicField: + o.Dynamic = append(o.Dynamic, x) + o.types |= HasDynamic + + case Expr: + o.HasEmbed = true + + case *Comprehension: + o.HasEmbed = true + + case *LetClause: + o.HasEmbed = true + + case *BulkOptionalField: + o.Bulk = append(o.Bulk, x) + o.types |= HasPattern + switch x.Filter.(type) { + case *BasicType, *Top: + default: + o.types |= HasComplexPattern + } + + case *Ellipsis: + switch x.Value.(type) { + case nil, *Top: + o.IsOpen = true + o.types |= IsOpen + + default: + // TODO: consider only adding for non-top. + o.types |= HasAdditional + } + o.Additional = append(o.Additional, x) + + default: + panic("unreachable") + } + } +} + +func (o *StructLit) fieldIndex(f Feature) int { + for i := range o.Fields { + if o.Fields[i].Label == f { + return i + } + } + return -1 +} + +func (o *StructLit) OptionalTypes() OptionalType { + return o.types +} + +func (o *StructLit) IsOptional(label Feature) bool { + for _, f := range o.Fields { + if f.Label == label && len(f.Optional) > 0 { + return true + } + } + return false +} + +// FIELDS +// +// Fields can also be used as expressions whereby the value field is the +// expression this allows retaining more context. + +// Field represents a field with a fixed label. It can be a regular field, +// definition or hidden field. +// +// foo: bar +// #foo: bar +// _foo: bar +// +// Legacy: +// +// Foo :: bar +// +type Field struct { + Src *ast.Field + + Label Feature + Value Expr +} + +func (x *Field) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +// An OptionalField represents an optional regular field. +// +// foo?: expr +// +type OptionalField struct { + Src *ast.Field + Label Feature + Value Expr +} + +func (x *OptionalField) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +// A BulkOptionalField represents a set of optional field. +// +// [expr]: expr +// +type BulkOptionalField struct { + Src *ast.Field // Elipsis or Field + Filter Expr + Value Expr + Label Feature // for reference and formatting +} + +func (x *BulkOptionalField) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +// A Ellipsis represents a set of optional fields of a given type. +// +// ...T +// +type Ellipsis struct { + Src *ast.Ellipsis + Value Expr +} + +func (x *Ellipsis) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +// A DynamicField represents a regular field for which the key is computed. +// +// "\(expr)": expr +// (expr): expr +// +type DynamicField struct { + Src *ast.Field + Key Expr + Value Expr +} + +func (x *DynamicField) IsOptional() bool { + return x.Src.Optional != token.NoPos +} + +func (x *DynamicField) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +// A ListLit represents an unevaluated list literal. +// +// [a, for x in src { ... }, b, ...T] +// +type ListLit struct { + Src *ast.ListLit + + // scalars, comprehensions, ...T + Elems []Elem + + info *StructLit // Shared closedness info. +} + +func (x *ListLit) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *ListLit) evaluate(c *OpContext) Value { + e := c.Env(0) + v := &Vertex{ + Parent: e.Vertex, + Conjuncts: []Conjunct{{e, x, CloseInfo{}}}, + } + // TODO: should be AllArcs and then use Finalize for builtins? + c.Unify(v, Finalized) // TODO: also partial okay? + return v +} + +// Null represents null. It can be used as a Value and Expr. +type Null struct { + Src ast.Node +} + +func (x *Null) Source() ast.Node { return x.Src } +func (x *Null) Kind() Kind { return NullKind } + +// Bool is a boolean value. It can be used as a Value and Expr. +type Bool struct { + Src ast.Node + B bool +} + +func (x *Bool) Source() ast.Node { return x.Src } +func (x *Bool) Kind() Kind { return BoolKind } + +// Num is a numeric value. It can be used as a Value and Expr. +type Num struct { + Src ast.Node + K Kind // needed? + X apd.Decimal // Is integer if the apd.Decimal is an integer. +} + +// TODO: do we need this? +// func NewNumFromString(src ast.Node, s string) Value { +// n := &Num{Src: src, K: IntKind} +// if strings.ContainsAny(s, "eE.") { +// n.K = FloatKind +// } +// _, _, err := n.X.SetString(s) +// if err != nil { +// pos := token.NoPos +// if src != nil { +// pos = src.Pos() +// } +// return &Bottom{Err: errors.Newf(pos, "invalid number: %v", err)} +// } +// return n +// } + +func (x *Num) Source() ast.Node { return x.Src } +func (x *Num) Kind() Kind { return x.K } + +// TODO: do we still need this? +// func (x *Num) Specialize(k Kind) Value { +// k = k & x.K +// if k == x.K { +// return x +// } +// y := *x +// y.K = k +// return &y +// } + +// String is a string value. It can be used as a Value and Expr. +type String struct { + Src ast.Node + Str string + RE *regexp.Regexp // only set if needed +} + +func (x *String) Source() ast.Node { return x.Src } +func (x *String) Kind() Kind { return StringKind } + +// Bytes is a bytes value. It can be used as a Value and Expr. +type Bytes struct { + Src ast.Node + B []byte + RE *regexp.Regexp // only set if needed +} + +func (x *Bytes) Source() ast.Node { return x.Src } +func (x *Bytes) Kind() Kind { return BytesKind } + +// Composites: the evaluated fields of a composite are recorded in the arc +// vertices. + +type ListMarker struct { + Src ast.Node + IsOpen bool +} + +func (x *ListMarker) Source() ast.Node { return x.Src } +func (x *ListMarker) Kind() Kind { return ListKind } +func (x *ListMarker) node() {} + +type StructMarker struct { + // NeedClose is used to signal that the evaluator should close this struct. + // It is only set by the close builtin. + NeedClose bool +} + +func (x *StructMarker) Source() ast.Node { return nil } +func (x *StructMarker) Kind() Kind { return StructKind } +func (x *StructMarker) node() {} + +// Top represents all possible values. It can be used as a Value and Expr. +type Top struct{ Src *ast.Ident } + +func (x *Top) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} +func (x *Top) Kind() Kind { return TopKind } + +// BasicType represents all values of a certain Kind. It can be used as a Value +// and Expr. +// +// string +// int +// num +// bool +// +type BasicType struct { + Src ast.Node + K Kind +} + +func (x *BasicType) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} +func (x *BasicType) Kind() Kind { return x.K } + +// TODO: do we still need this? +// func (x *BasicType) Specialize(k Kind) Value { +// k = x.K & k +// if k == x.K { +// return x +// } +// y := *x +// y.K = k +// return &y +// } + +// TODO: should we use UnaryExpr for Bound now we have BoundValue? + +// BoundExpr represents an unresolved unary comparator. +// +// Concrete { + return ctx.NewErrf("bound has fixed non-concrete value") + } + return &BoundValue{x.Src, x.Op, v} + } + + // This simplifies boundary expressions. It is an alternative to an + // evaluation strategy that makes nodes increasingly more specific. + // + // For instance, a completely different implementation would be to allow + // the precense of a concrete value to ignore incomplete errors. + // + // TODO: consider an alternative approach. + switch y := v.(type) { + case *BoundValue: + switch { + case y.Op == NotEqualOp: + switch x.Op { + case LessEqualOp, LessThanOp, GreaterEqualOp, GreaterThanOp: + // <(!=3) => number + // Smaller than an arbitrarily large number is any number. + return &BasicType{K: y.Kind()} + case NotEqualOp: + // !=(!=3) ==> 3 + // Not a value that is anything but a given value is that + // given value. + return y.Value + } + + case x.Op == NotEqualOp: + // Invert if applicable. + switch y.Op { + case LessEqualOp: + return &BoundValue{x.Src, GreaterThanOp, y.Value} + case LessThanOp: + return &BoundValue{x.Src, GreaterEqualOp, y.Value} + case GreaterEqualOp: + return &BoundValue{x.Src, LessThanOp, y.Value} + case GreaterThanOp: + return &BoundValue{x.Src, LessEqualOp, y.Value} + } + + case (x.Op == LessThanOp || x.Op == LessEqualOp) && + (y.Op == GreaterThanOp || y.Op == GreaterEqualOp), + (x.Op == GreaterThanOp || x.Op == GreaterEqualOp) && + (y.Op == LessThanOp || y.Op == LessEqualOp): + // <(>=3) + // Something smaller than an arbitrarily large number is any number. + return &BasicType{K: y.Kind()} + + case x.Op == LessThanOp && + (y.Op == LessEqualOp || y.Op == LessThanOp), + x.Op == GreaterThanOp && + (y.Op == GreaterEqualOp || y.Op == GreaterThanOp): + // <(<=x) => <=x + // Less or equal than something that is less than x is less than x. + return y + } + + case *BasicType: + switch x.Op { + case LessEqualOp, LessThanOp, GreaterEqualOp, GreaterThanOp: + return y + } + } + if v.Concreteness() > Concrete { + // TODO(errors): analyze dependencies of x.Expr to get positions. + ctx.addErrf(IncompleteError, token.NoPos, // TODO(errors): use ctx.pos()? + "non-concrete value %s for bound %s", x.Expr, x.Op) + return nil + } + return &BoundValue{x.Src, x.Op, v} +} + +// A BoundValue is a fully evaluated unary comparator that can be used to +// validate other values. +// +// <5 +// =~"Name$" +// +type BoundValue struct { + Src ast.Expr + Op Op + Value Value +} + +func (x *BoundValue) Source() ast.Node { return x.Src } +func (x *BoundValue) Kind() Kind { + k := x.Value.Kind() + switch k { + case IntKind, FloatKind, NumKind: + return NumKind + + case NullKind: + if x.Op == NotEqualOp { + return TopKind &^ NullKind + } + } + return k +} + +func (x *BoundValue) validate(c *OpContext, y Value) *Bottom { + a := y // Can be list or struct. + b := c.scalar(x.Value) + if c.HasErr() { + return c.Err() + } + + switch v := BinOp(c, x.Op, a, b).(type) { + case *Bottom: + return v + + case *Bool: + if v.B { + return nil + } + // TODO(errors): use "invalid value %v (not an %s)" if x is a + // predeclared identifier such as `int`. + err := c.Newf("invalid value %v (out of bound %s)", y, x) + err.AddPosition(y) + return &Bottom{Src: c.src, Err: err, Code: EvalError} + + default: + panic(fmt.Sprintf("unsupported type %T", v)) + } +} + +func (x *BoundValue) validateStr(c *OpContext, a string) bool { + if str, ok := x.Value.(*String); ok { + b := str.Str + switch x.Op { + case LessEqualOp: + return a <= b + case LessThanOp: + return a < b + case GreaterEqualOp: + return a >= b + case GreaterThanOp: + return a > b + case EqualOp: + return a == b + case NotEqualOp: + return a != b + case MatchOp: + return c.regexp(x.Value).MatchString(a) + case NotMatchOp: + return !c.regexp(x.Value).MatchString(a) + } + } + return x.validate(c, &String{Str: a}) == nil +} + +func (x *BoundValue) validateInt(c *OpContext, a int64) bool { + switch n := x.Value.(type) { + case *Num: + b, err := n.X.Int64() + if err != nil { + break + } + switch x.Op { + case LessEqualOp: + return a <= b + case LessThanOp: + return a < b + case GreaterEqualOp: + return a >= b + case GreaterThanOp: + return a > b + case EqualOp: + return a == b + case NotEqualOp: + return a != b + } + } + return x.validate(c, c.NewInt64(a)) == nil +} + +// A NodeLink is used during computation to refer to an existing Vertex. +// It is used to signal a potential cycle or reference. +// Note that a NodeLink may be used as a value. This should be taken into +// account. +type NodeLink struct { + Node *Vertex +} + +func (x *NodeLink) Kind() Kind { + return x.Node.Kind() +} +func (x *NodeLink) Source() ast.Node { return x.Node.Source() } + +func (x *NodeLink) resolve(c *OpContext, state VertexStatus) *Vertex { + return x.Node +} + +// A FieldReference represents a lexical reference to a field. +// +// a +// +type FieldReference struct { + Src *ast.Ident + UpCount int32 + Label Feature +} + +func (x *FieldReference) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *FieldReference) resolve(c *OpContext, state VertexStatus) *Vertex { + n := c.relNode(x.UpCount) + pos := pos(x) + return c.lookup(n, pos, x.Label, state) +} + +// A ValueReference represents a lexical reference to a value. +// +// a: X=b +// +type ValueReference struct { + Src *ast.Ident + UpCount int32 + Label Feature // for informative purposes +} + +func (x *ValueReference) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *ValueReference) resolve(c *OpContext, state VertexStatus) *Vertex { + if x.UpCount == 0 { + return c.vertex + } + n := c.relNode(x.UpCount - 1) + return n +} + +// A LabelReference refers to the string or integer value of a label. +// +// [X=Pattern]: b: X +// +type LabelReference struct { + Src *ast.Ident + UpCount int32 +} + +// TODO: should this implement resolver at all? + +func (x *LabelReference) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *LabelReference) evaluate(ctx *OpContext) Value { + label := ctx.relLabel(x.UpCount) + if label == 0 { + // There is no label. This may happen if a LabelReference is evaluated + // outside of the context of a parent node, for instance if an + // "additional" items or properties is evaluated in isolation. + // + // TODO: this should return the pattern of the label. + return &BasicType{K: StringKind} + } + return label.ToValue(ctx) +} + +// A DynamicReference is like a LabelReference, but with a computed label. +// +// X=(x): X +// X="\(x)": X +// +type DynamicReference struct { + Src *ast.Ident + UpCount int32 + Label Expr + + // TODO: only use aliases and store the actual expression only in the scope. + // The feature is unique for every instance. This will also allow dynamic + // fields to be ordered among normal fields. + // + // This could also be used to assign labels to embedded values, if they + // don't match a label. + Alias Feature +} + +func (x *DynamicReference) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *DynamicReference) resolve(ctx *OpContext, state VertexStatus) *Vertex { + e := ctx.Env(x.UpCount) + frame := ctx.PushState(e, x.Src) + v := ctx.value(x.Label) + ctx.PopState(frame) + f := ctx.Label(x.Label, v) + return ctx.lookup(e.Vertex, pos(x), f, state) +} + +// An ImportReference refers to an imported package. +// +// import "strings" +// +// strings.ToLower("Upper") +// +type ImportReference struct { + Src *ast.Ident + ImportPath Feature + Label Feature // for informative purposes +} + +func (x *ImportReference) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *ImportReference) resolve(ctx *OpContext, state VertexStatus) *Vertex { + path := x.ImportPath.StringValue(ctx) + v := ctx.Runtime.LoadImport(path) + if v == nil { + ctx.addErrf(EvalError, x.Src.Pos(), "cannot find package %q", path) + } + return v +} + +// A LetReference evaluates a let expression in its original environment. +// +// let X = x +// +type LetReference struct { + Src *ast.Ident + UpCount int32 + Label Feature // for informative purposes + X Expr +} + +func (x *LetReference) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *LetReference) resolve(c *OpContext, state VertexStatus) *Vertex { + e := c.Env(x.UpCount) + label := e.Vertex.Label + if x.X == nil { + panic("nil expression") + } + // Anonymous arc. + return &Vertex{Parent: nil, Label: label, Conjuncts: []Conjunct{{e, x.X, CloseInfo{}}}} +} + +func (x *LetReference) evaluate(c *OpContext) Value { + e := c.Env(x.UpCount) + + // Not caching let expressions may lead to exponential behavior. + return e.evalCached(c, x.X) +} + +// A SelectorExpr looks up a fixed field in an expression. +// +// X.Sel +// +type SelectorExpr struct { + Src *ast.SelectorExpr + X Expr + Sel Feature +} + +func (x *SelectorExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *SelectorExpr) resolve(c *OpContext, state VertexStatus) *Vertex { + // TODO: the node should really be evaluated as AllArcs, but the order + // of evaluation is slightly off, causing too much to be evaluated. + // This may especially result in incorrect results when using embedded + // scalars. + n := c.node(x, x.X, x.Sel.IsRegular(), Partial) + if n == emptyNode { + return n + } + if n.status == Partial { + if b := n.state.incompleteErrors(); b != nil && b.Code < CycleError { + n.BaseValue = b + return n + } + } + return c.lookup(n, x.Src.Sel.Pos(), x.Sel, state) +} + +// IndexExpr is like a selector, but selects an index. +// +// X[Index] +// +type IndexExpr struct { + Src *ast.IndexExpr + X Expr + Index Expr +} + +func (x *IndexExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *IndexExpr) resolve(ctx *OpContext, state VertexStatus) *Vertex { + // TODO: support byte index. + // TODO: the node should really be evaluated as AllArcs, but the order + // of evaluation is slightly off, causing too much to be evaluated. + // This may especially result in incorrect results when using embedded + // scalars. + n := ctx.node(x, x.X, true, Partial) + i := ctx.value(x.Index) + if n == emptyNode { + return n + } + if n.status == Partial { + if b := n.state.incompleteErrors(); b != nil && b.Code < CycleError { + n.BaseValue = b + return n + } + } + f := ctx.Label(x.Index, i) + return ctx.lookup(n, x.Src.Index.Pos(), f, state) +} + +// A SliceExpr represents a slice operation. (Not currently in spec.) +// +// X[Lo:Hi:Stride] +// +type SliceExpr struct { + Src *ast.SliceExpr + X Expr + Lo Expr + Hi Expr + Stride Expr +} + +func (x *SliceExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *SliceExpr) evaluate(c *OpContext) Value { + // TODO: strides + + v := c.value(x.X) + const as = "slice index" + + switch v := v.(type) { + case nil: + c.addErrf(IncompleteError, c.pos(), "non-concrete slice subject %s", x.X) + return nil + case *Vertex: + if !v.IsList() { + break + } + + var ( + lo = uint64(0) + hi = uint64(len(v.Arcs)) + ) + if x.Lo != nil { + lo = c.uint64(c.value(x.Lo), as) + } + if x.Hi != nil { + hi = c.uint64(c.value(x.Hi), as) + if hi > uint64(len(v.Arcs)) { + return c.NewErrf("index %d out of range", hi) + } + } + if lo > hi { + return c.NewErrf("invalid slice index: %d > %d", lo, hi) + } + + n := c.newList(c.src, v.Parent) + for i, a := range v.Arcs[lo:hi] { + label, err := MakeLabel(a.Source(), int64(i), IntLabel) + if err != nil { + c.AddBottom(&Bottom{Src: a.Source(), Err: err}) + return nil + } + arc := *a + arc.Parent = n + arc.Label = label + n.Arcs = append(n.Arcs, &arc) + } + n.status = Finalized + return n + + case *Bytes: + var ( + lo = uint64(0) + hi = uint64(len(v.B)) + ) + if x.Lo != nil { + lo = c.uint64(c.value(x.Lo), as) + } + if x.Hi != nil { + hi = c.uint64(c.value(x.Hi), as) + if hi > uint64(len(v.B)) { + return c.NewErrf("index %d out of range", hi) + } + } + if lo > hi { + return c.NewErrf("invalid slice index: %d > %d", lo, hi) + } + return c.newBytes(v.B[lo:hi]) + } + + if isError(v) { + return v + } + return c.NewErrf("cannot slice %v (type %s)", v, v.Kind()) +} + +// An Interpolation is a string interpolation. +// +// "a \(b) c" +// +type Interpolation struct { + Src *ast.Interpolation + K Kind // string or bytes + Parts []Expr // odd: strings, even sources +} + +func (x *Interpolation) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *Interpolation) evaluate(c *OpContext) Value { + buf := bytes.Buffer{} + for _, e := range x.Parts { + v := c.value(e) + if x.K == BytesKind { + buf.Write(c.ToBytes(v)) + } else { + buf.WriteString(c.ToString(v)) + } + } + if err := c.Err(); err != nil { + err = &Bottom{ + Code: err.Code, + Err: errors.Wrapf(err.Err, pos(x), "invalid interpolation"), + } + // c.AddBottom(err) + // return nil + return err + } + if x.K == BytesKind { + return &Bytes{x.Src, buf.Bytes(), nil} + } + return &String{x.Src, buf.String(), nil} +} + +// UnaryExpr is a unary expression. +// +// Op X +// -X !X +X +// +type UnaryExpr struct { + Src *ast.UnaryExpr + Op Op + X Expr +} + +func (x *UnaryExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *UnaryExpr) evaluate(c *OpContext) Value { + if !c.concreteIsPossible(x.Op, x.X) { + return nil + } + v := c.value(x.X) + if isError(v) { + return v + } + + op := x.Op + k := kind(v) + expectedKind := k + switch op { + case SubtractOp: + if v, ok := v.(*Num); ok { + f := *v + f.X.Neg(&v.X) + f.Src = x.Src + return &f + } + expectedKind = NumKind + + case AddOp: + if v, ok := v.(*Num); ok { + // TODO: wrap in thunk to save position of '+'? + return v + } + expectedKind = NumKind + + case NotOp: + if v, ok := v.(*Bool); ok { + return &Bool{x.Src, !v.B} + } + expectedKind = BoolKind + } + if k&expectedKind != BottomKind { + c.addErrf(IncompleteError, pos(x.X), + "operand %s of '%s' not concrete (was %s)", x.X, op, k) + return nil + } + return c.NewErrf("invalid operation %s (%s %s)", x, op, k) +} + +// BinaryExpr is a binary expression. +// +// X + Y +// X & Y +// +type BinaryExpr struct { + Src *ast.BinaryExpr + Op Op + X Expr + Y Expr +} + +func (x *BinaryExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *BinaryExpr) evaluate(c *OpContext) Value { + env := c.Env(0) + if x.Op == AndOp { + // Anonymous Arc + v := &Vertex{Conjuncts: []Conjunct{{env, x, CloseInfo{}}}} + c.Unify(v, Finalized) + return v + } + + if !c.concreteIsPossible(x.Op, x.X) || !c.concreteIsPossible(x.Op, x.Y) { + return nil + } + + // TODO: allow comparing to a literal Bottom only. Find something more + // principled perhaps. One should especially take care that two values + // evaluating to Bottom don't evaluate to true. For now we check for + // Bottom here and require that one of the values be a Bottom literal. + if x.Op == EqualOp || x.Op == NotEqualOp { + if isLiteralBottom(x.X) { + return c.validate(env, x.Src, x.Y, x.Op) + } + if isLiteralBottom(x.Y) { + return c.validate(env, x.Src, x.X, x.Op) + } + } + + left, _ := c.Concrete(env, x.X, x.Op) + right, _ := c.Concrete(env, x.Y, x.Op) + + if err := CombineErrors(x.Src, left, right); err != nil { + return err + } + + if err := c.Err(); err != nil { + return err + } + + return BinOp(c, x.Op, left, right) +} + +func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op) (r Value) { + s := c.PushState(env, src) + if c.nonMonotonicLookupNest == 0 { + c.nonMonotonicGeneration++ + } + + var match bool + // NOTE: using Unwrap is maybe note entirely accurate, as it may discard + // a future error. However, if it does so, the error will at least be + // reported elsewhere. + switch b := c.value(x).(type) { + case nil: + case *Bottom: + if b.Code == CycleError { + c.PopState(s) + c.AddBottom(b) + return nil + } + match = op == EqualOp + // We have a nonmonotonic use of a failure. Referenced fields should + // not be added anymore. + c.nonMonotonicRejectNest++ + c.evalState(x, Partial) + c.nonMonotonicRejectNest-- + + default: + // TODO(cycle): if EqualOp: + // - ensure to pass special status to if clause or keep a track of "hot" + // paths. + // - evaluate hypothetical struct + // - walk over all fields and verify that fields are not contradicting + // previously marked fields. + // + switch { + case b.Concreteness() > Concrete: + // TODO: mimic comparison to bottom semantics. If it is a valid + // value, check for concreteness that this level only. This + // should ultimately be replaced with an exists and valid + // builtin. + match = op == EqualOp + default: + match = op != EqualOp + } + c.nonMonotonicLookupNest++ + c.evalState(x, Partial) + c.nonMonotonicLookupNest-- + } + + c.PopState(s) + return &Bool{src, match} +} + +// A CallExpr represents a call to a builtin. +// +// len(x) +// strings.ToLower(x) +// +type CallExpr struct { + Src *ast.CallExpr + Fun Expr + Args []Expr +} + +func (x *CallExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *CallExpr) evaluate(c *OpContext) Value { + fun := c.value(x.Fun) + var b *Builtin + switch f := fun.(type) { + case *Builtin: + b = f + + case *BuiltinValidator: + // We allow a validator that takes no arguments accept the validated + // value to be called with zero arguments. + switch { + case f.Src != nil: + c.AddErrf("cannot call previously called validator %s", x.Fun) + + case f.Builtin.IsValidator(len(x.Args)): + v := *f + v.Src = x + return &v + + default: + b = f.Builtin + } + + default: + c.AddErrf("cannot call non-function %s (type %s)", x.Fun, kind(fun)) + return nil + } + args := []Value{} + for i, a := range x.Args { + expr := c.value(a) + switch v := expr.(type) { + case nil: + // There SHOULD be an error in the context. If not, we generate + // one. + c.Assertf(pos(x.Fun), c.HasErr(), + "argument %d to function %s is incomplete", i, x.Fun) + + case *Bottom: + // TODO(errors): consider adding an argument index for this errors. + // On the other hand, this error is really not related to the + // argument itself, so maybe it is good as it is. + c.AddBottom(v) + + default: + args = append(args, expr) + } + } + if c.HasErr() { + return nil + } + if b.IsValidator(len(args)) { + return &BuiltinValidator{x, b, args} + } + result := b.call(c, pos(x), args) + if result == nil { + return nil + } + return c.evalState(result, Partial) +} + +// A Builtin is a value representing a native function call. +type Builtin struct { + // TODO: make these values for better type checking. + Params []Param + Result Kind + Func func(c *OpContext, args []Value) Expr + + Package Feature + Name string +} + +type Param struct { + Name Feature // name of the argument; mostly for documentation + Value Value // Could become Value later, using disjunctions for defaults. +} + +// Kind returns the kind mask of this parameter. +func (p Param) Kind() Kind { + return p.Value.Kind() +} + +// Default reports the default value for this Param or nil if there is none. +func (p Param) Default() Value { + d, ok := p.Value.(*Disjunction) + if !ok || d.NumDefaults != 1 { + return nil + } + return d.Values[0] +} + +func (x *Builtin) WriteName(w io.Writer, c *OpContext) { + _, _ = fmt.Fprintf(w, "%s.%s", x.Package.StringValue(c), x.Name) +} + +// Kind here represents the case where Builtin is used as a Validator. +func (x *Builtin) Kind() Kind { + return FuncKind +} + +func (x *Builtin) BareValidator() *BuiltinValidator { + if len(x.Params) != 1 || + (x.Result != BoolKind && x.Result != BottomKind) { + return nil + } + return &BuiltinValidator{Builtin: x} +} + +// IsValidator reports whether b should be interpreted as a Validator for the +// given number of arguments. +func (b *Builtin) IsValidator(numArgs int) bool { + return numArgs == len(b.Params)-1 && + b.Result&^BoolKind == 0 && + b.Params[numArgs].Default() == nil +} + +func bottom(v Value) *Bottom { + if x, ok := v.(*Vertex); ok { + v = x.Value() + } + b, _ := v.(*Bottom) + return b +} + +func (x *Builtin) call(c *OpContext, p token.Pos, args []Value) Expr { + fun := x // right now always x. + if len(args) > len(x.Params) { + c.addErrf(0, p, + "too many arguments in call to %s (have %d, want %d)", + fun, len(args), len(x.Params)) + return nil + } + for i := len(args); i < len(x.Params); i++ { + v := x.Params[i].Default() + if v == nil { + c.addErrf(0, p, + "not enough arguments in call to %s (have %d, want %d)", + fun, len(args), len(x.Params)) + return nil + } + args = append(args, v) + } + for i, a := range args { + if x.Params[i].Kind() == BottomKind { + continue + } + if b := bottom(a); b != nil { + return b + } + if k := kind(a); x.Params[i].Kind()&k == BottomKind { + code := EvalError + b, _ := args[i].(*Bottom) + if b != nil { + code = b.Code + } + c.addErrf(code, pos(a), + "cannot use %s (type %s) as %s in argument %d to %s", + a, k, x.Params[i].Kind(), i+1, fun) + return nil + } + v := x.Params[i].Value + if _, ok := v.(*BasicType); !ok { + env := c.Env(0) + x := &BinaryExpr{Op: AndOp, X: v, Y: a} + n := &Vertex{Conjuncts: []Conjunct{{env, x, CloseInfo{}}}} + c.Unify(n, Finalized) + if _, ok := n.BaseValue.(*Bottom); ok { + c.addErrf(0, pos(a), + "cannot use %s as %s in argument %d to %s", + a, v, i+1, fun) + return nil + } + args[i] = n + } + } + return x.Func(c, args) +} + +func (x *Builtin) Source() ast.Node { return nil } + +// A BuiltinValidator is a Value that results from evaluation a partial call +// to a builtin (using CallExpr). +// +// strings.MinRunes(4) +// +type BuiltinValidator struct { + Src *CallExpr + Builtin *Builtin + Args []Value // any but the first value +} + +func (x *BuiltinValidator) Source() ast.Node { + if x.Src == nil { + return x.Builtin.Source() + } + return x.Src.Source() +} + +func (x *BuiltinValidator) Pos() token.Pos { + if src := x.Source(); src != nil { + return src.Pos() + } + return token.NoPos +} + +func (x *BuiltinValidator) Kind() Kind { + return x.Builtin.Params[0].Kind() +} + +func (x *BuiltinValidator) validate(c *OpContext, v Value) *Bottom { + args := make([]Value, len(x.Args)+1) + args[0] = v + copy(args[1:], x.Args) + + return validateWithBuiltin(c, x.Pos(), x.Builtin, args) +} + +func validateWithBuiltin(c *OpContext, src token.Pos, b *Builtin, args []Value) *Bottom { + var severeness ErrorCode + var err errors.Error + + res := b.call(c, src, args) + switch v := res.(type) { + case nil: + return nil + + case *Bottom: + if v == nil { + return nil // caught elsewhere, but be defensive. + } + severeness = v.Code + err = v.Err + + case *Bool: + if v.B { + return nil + } + + default: + return c.NewErrf("invalid validator %s.%s", b.Package.StringValue(c), b.Name) + } + + // failed: + var buf bytes.Buffer + b.WriteName(&buf, c) + if len(args) > 1 { + buf.WriteString("(") + for i, a := range args[1:] { + if i > 0 { + _, _ = buf.WriteString(", ") + } + buf.WriteString(c.Str(a)) + } + buf.WriteString(")") + } + + vErr := c.NewPosf(src, "invalid value %s (does not satisfy %s)", args[0], buf.String()) + + for _, v := range args { + vErr.AddPosition(v) + } + + return &Bottom{Code: severeness, Err: errors.Wrap(vErr, err)} +} + +// A Disjunction represents a disjunction, where each disjunct may or may not +// be marked as a default. +type DisjunctionExpr struct { + Src *ast.BinaryExpr + Values []Disjunct + + HasDefaults bool +} + +// A Disjunct is used in Disjunction. +type Disjunct struct { + Val Expr + Default bool +} + +func (x *DisjunctionExpr) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *DisjunctionExpr) evaluate(c *OpContext) Value { + e := c.Env(0) + v := &Vertex{Conjuncts: []Conjunct{{e, x, CloseInfo{}}}} + c.Unify(v, Finalized) // TODO: also partial okay? + // TODO: if the disjunction result originated from a literal value, we may + // consider the result closed to create more permanent errors. + return v +} + +// A Conjunction is a conjunction of values that cannot be represented as a +// single value. It is the result of unification. +type Conjunction struct { + Src ast.Expr + Values []Value +} + +func (x *Conjunction) Source() ast.Node { return x.Src } +func (x *Conjunction) Kind() Kind { + k := TopKind + for _, v := range x.Values { + k &= v.Kind() + } + return k +} + +// A disjunction is a disjunction of values. It is the result of expanding +// a DisjunctionExpr if the expression cannot be represented as a single value. +type Disjunction struct { + Src ast.Expr + + // Values are the non-error disjuncts of this expression. The first + // NumDefault values are default values. + Values []*Vertex + + Errors *Bottom // []bottom + + // NumDefaults indicates the number of default values. + NumDefaults int + HasDefaults bool +} + +func (x *Disjunction) Source() ast.Node { return x.Src } +func (x *Disjunction) Kind() Kind { + k := BottomKind + for _, v := range x.Values { + k |= v.Kind() + } + return k +} + +type Comprehension struct { + Clauses Yielder + Value Expr +} + +func (x *Comprehension) Source() ast.Node { + if x.Clauses == nil { + return nil + } + return x.Clauses.Source() +} + +// A ForClause represents a for clause of a comprehension. It can be used +// as a struct or list element. +// +// for k, v in src {} +// +type ForClause struct { + Syntax *ast.ForClause + Key Feature + Value Feature + Src Expr + Dst Yielder +} + +func (x *ForClause) Source() ast.Node { + if x.Syntax == nil { + return nil + } + return x.Syntax +} + +func (x *ForClause) yield(c *OpContext, f YieldFunc) { + n := c.node(x, x.Src, true, Finalized) + for _, a := range n.Arcs { + if !a.Label.IsRegular() { + continue + } + + c.Unify(a, Partial) + + n := &Vertex{status: Finalized} + + if x.Value != InvalidLabel { + b := &Vertex{ + Label: x.Value, + BaseValue: a, + } + n.Arcs = append(n.Arcs, b) + } + + if x.Key != InvalidLabel { + v := &Vertex{Label: x.Key} + key := a.Label.ToValue(c) + v.AddConjunct(MakeRootConjunct(c.Env(0), key)) + v.SetValue(c, Finalized, key) + n.Arcs = append(n.Arcs, v) + } + + sub := c.spawn(n) + saved := c.PushState(sub, x.Dst.Source()) + x.Dst.yield(c, f) + if b := c.PopState(saved); b != nil { + c.AddBottom(b) + break + } + if c.HasErr() { + break + } + } +} + +// An IfClause represents an if clause of a comprehension. It can be used +// as a struct or list element. +// +// if cond {} +// +type IfClause struct { + Src *ast.IfClause + Condition Expr + Dst Yielder +} + +func (x *IfClause) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *IfClause) yield(ctx *OpContext, f YieldFunc) { + if ctx.BoolValue(ctx.value(x.Condition)) { + x.Dst.yield(ctx, f) + } +} + +// An LetClause represents a let clause in a comprehension. +// +// let x = y +// +type LetClause struct { + Src *ast.LetClause + Label Feature + Expr Expr + Dst Yielder +} + +func (x *LetClause) Source() ast.Node { + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *LetClause) yield(c *OpContext, f YieldFunc) { + n := &Vertex{Arcs: []*Vertex{ + {Label: x.Label, Conjuncts: []Conjunct{{c.Env(0), x.Expr, CloseInfo{}}}}, + }} + + sub := c.spawn(n) + saved := c.PushState(sub, x.Dst.Source()) + x.Dst.yield(c, f) + if b := c.PopState(saved); b != nil { + c.AddBottom(b) + } +} + +// A ValueClause represents the value part of a comprehension. +type ValueClause struct { + *StructLit +} + +func (x *ValueClause) Source() ast.Node { + if x.StructLit == nil { + return nil + } + if x.Src == nil { + return nil + } + return x.Src +} + +func (x *ValueClause) yield(op *OpContext, f YieldFunc) { + f(op.Env(0)) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/feature.go b/vendor/cuelang.org/go/internal/core/adt/feature.go new file mode 100644 index 0000000000..26d6c9301f --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/feature.go @@ -0,0 +1,324 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// A Feature is an encoded form of a label which comprises a compact +// representation of an integer or string label as well as a label type. +type Feature uint32 + +// TODO: create labels such that list are sorted first (or last with index.) + +// InvalidLabel is an encoding of an erroneous label. +const ( + InvalidLabel Feature = 0 + + // MaxIndex indicates the maximum number of unique strings that are used for + // labeles within this CUE implementation. + MaxIndex = 1<<(32-indexShift) - 1 +) + +// These labels can be used for wildcard queries. +var ( + AnyDefinition Feature = makeLabel(MaxIndex, DefinitionLabel) + AnyHidden Feature = makeLabel(MaxIndex, HiddenLabel) + AnyString Feature = makeLabel(MaxIndex, StringLabel) + AnyIndex Feature = makeLabel(MaxIndex, IntLabel) +) + +// A StringIndexer coverts strings to and from an index that is unique for a +// given string. +type StringIndexer interface { + // ToIndex returns a unique positive index for s (0 < index < 2^28-1). + // + // For each pair of strings s and t it must return the same index if and + // only if s == t. + StringToIndex(s string) (index int64) + + // ToString returns a string s for index such that ToIndex(s) == index. + IndexToString(index int64) string +} + +// SelectorString reports the shortest string representation of f when used as a +// selector. +func (f Feature) SelectorString(index StringIndexer) string { + x := f.safeIndex() + switch f.Typ() { + case IntLabel: + return strconv.Itoa(int(x)) + case StringLabel: + s := index.IndexToString(x) + if ast.IsValidIdent(s) && !internal.IsDefOrHidden(s) { + return s + } + return literal.String.Quote(s) + default: + return f.IdentString(index) + } +} + +// IdentString reports the identifier of f. The result is undefined if f +// is not an identifier label. +func (f Feature) IdentString(index StringIndexer) string { + s := index.IndexToString(f.safeIndex()) + if f.IsHidden() { + if p := strings.IndexByte(s, '\x00'); p >= 0 { + s = s[:p] + } + } + return s +} + +// PkgID returns the package identifier, composed of the module and package +// name, associated with this identifier. It will return "" if this is not +// a hidden label. +func (f Feature) PkgID(index StringIndexer) string { + if !f.IsHidden() { + return "" + } + s := index.IndexToString(f.safeIndex()) + if p := strings.IndexByte(s, '\x00'); p >= 0 { + s = s[p+1:] + } + return s +} + +// StringValue reports the string value of f, which must be a string label. +func (f Feature) StringValue(index StringIndexer) string { + if !f.IsString() { + panic("not a string label") + } + x := f.safeIndex() + return index.IndexToString(x) +} + +// ToValue converts a label to a value, which will be a Num for integer labels +// and a String for string labels. It panics when f is not a regular label. +func (f Feature) ToValue(ctx *OpContext) Value { + if !f.IsRegular() { + panic("not a regular label") + } + // TODO: Handle special regular values: invalid and AnyRegular. + if f.IsInt() { + return ctx.NewInt64(int64(f.Index())) + } + x := f.safeIndex() + str := ctx.IndexToString(x) + return ctx.NewString(str) +} + +// StringLabel converts s to a string label. +func (c *OpContext) StringLabel(s string) Feature { + return labelFromValue(c, nil, &String{Str: s}) +} + +// MakeStringLabel creates a label for the given string. +func MakeStringLabel(r StringIndexer, s string) Feature { + i := r.StringToIndex(s) + + // TODO: set position if it exists. + f, err := MakeLabel(nil, i, StringLabel) + if err != nil { + panic("out of free string slots") + } + return f +} + +// MakeIdentLabel creates a label for the given identifier. +func MakeIdentLabel(r StringIndexer, s, pkgpath string) Feature { + t := StringLabel + switch { + case strings.HasPrefix(s, "_#"): + t = HiddenDefinitionLabel + s = fmt.Sprintf("%s\x00%s", s, pkgpath) + case strings.HasPrefix(s, "#"): + t = DefinitionLabel + case strings.HasPrefix(s, "_"): + s = fmt.Sprintf("%s\x00%s", s, pkgpath) + t = HiddenLabel + } + i := r.StringToIndex(s) + f, err := MakeLabel(nil, i, t) + if err != nil { + panic("out of free string slots") + } + return f +} + +const msgGround = "invalid non-ground value %s (must be concrete %s)" + +func labelFromValue(c *OpContext, src Expr, v Value) Feature { + var i int64 + var t FeatureType + if isError(v) { + return InvalidLabel + } + switch v.Kind() { + case IntKind, NumKind: + x, _ := Unwrap(v).(*Num) + if x == nil { + c.addErrf(IncompleteError, pos(v), msgGround, v, "int") + return InvalidLabel + } + t = IntLabel + var err error + i, err = x.X.Int64() + if err != nil || x.K != IntKind { + if src == nil { + src = v + } + c.AddErrf("invalid index %v: %v", src, err) + return InvalidLabel + } + if i < 0 { + switch src.(type) { + case nil, *Num, *UnaryExpr: + // If the value is a constant, we know it is always an error. + // UnaryExpr is an approximation for a constant value here. + c.AddErrf("invalid index %s (index must be non-negative)", x) + default: + // Use a different message is it is the result of evaluation. + c.AddErrf("index %s out of range [%s]", src, x) + } + return InvalidLabel + } + + case StringKind: + x, _ := Unwrap(v).(*String) + if x == nil { + c.addErrf(IncompleteError, pos(v), msgGround, v, "string") + return InvalidLabel + } + t = StringLabel + i = c.StringToIndex(x.Str) + + default: + if src != nil { + c.AddErrf("invalid index %s (invalid type %v)", src, v.Kind()) + } else { + c.AddErrf("invalid index type %v", v.Kind()) + } + return InvalidLabel + } + + // TODO: set position if it exists. + f, err := MakeLabel(nil, i, t) + if err != nil { + c.AddErr(err) + } + return f +} + +// MakeLabel creates a label. It reports an error if the index is out of range. +func MakeLabel(src ast.Node, index int64, f FeatureType) (Feature, errors.Error) { + if 0 > index || index > MaxIndex-1 { + p := token.NoPos + if src != nil { + p = src.Pos() + } + return InvalidLabel, + errors.Newf(p, "int label out of range (%d not >=0 and <= %d)", + index, MaxIndex-1) + } + return Feature(index)<