diff --git a/.ko.yaml b/.ko.yaml
index fe71c7d1ca..a50c850f1f 100644
--- a/.ko.yaml
+++ b/.ko.yaml
@@ -1,7 +1,30 @@
baseImageOverrides:
- github.com/google/ko: golang:1.19
+ github.com/google/ko: cgr.dev/chainguard/go
builds:
- id: ko
ldflags:
- "{{ .Env.LDFLAGS }}"
+
+verification:
+ # Override the default of "warn" to "deny"
+ noMatchPolicy: deny
+ policies:
+ # Expand the default base image policy (covers static) to include
+ # all Chainguard images (namely Go, see above).
+ - data: |
+ apiVersion: policy.sigstore.dev/v1beta1
+ kind: ClusterImagePolicy
+ metadata:
+ name: chainguard-images
+ spec:
+ images:
+ - glob: cgr.dev/chainguard/**
+ authorities:
+ - keyless:
+ url: https://fulcio.sigstore.dev
+ identities:
+ - issuer: https://token.actions.githubusercontent.com
+ subject: https://github.com/chainguard-images/images/.github/workflows/release.yaml@refs/heads/main
+ ctlog:
+ url: https://rekor.sigstore.dev
diff --git a/go.mod b/go.mod
index af4731a5ad..b633f32830 100644
--- a/go.mod
+++ b/go.mod
@@ -3,30 +3,38 @@ module github.com/google/ko
go 1.18
require (
- github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04
- github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08
+ github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221027043306-dc425bc05c64
+ github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206
github.com/containerd/stargz-snapshotter/estargz v0.13.0
github.com/docker/docker v20.10.22+incompatible
github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936
github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b
github.com/google/go-cmp v0.5.9
- github.com/google/go-containerregistry v0.12.1
+ github.com/google/go-containerregistry v0.12.2-0.20221114162634-781782aa2757
github.com/opencontainers/image-spec v1.1.0-rc2
github.com/sigstore/cosign v1.13.1
+ github.com/sigstore/policy-controller v0.5.2
github.com/spf13/cobra v1.6.1
github.com/spf13/viper v1.14.0
go.uber.org/automaxprocs v1.5.1
golang.org/x/sync v0.1.0
golang.org/x/tools v0.4.0
gopkg.in/yaml.v3 v3.0.1
+ k8s.io/api v0.26.0
k8s.io/apimachinery v0.26.0
+ knative.dev/pkg v0.0.0-20221221230956-4fd6eb8652b7
sigs.k8s.io/kind v0.17.0
+ sigs.k8s.io/yaml v1.3.0
)
require (
- cloud.google.com/go/compute v1.12.1 // indirect
- cloud.google.com/go/compute/metadata v0.2.1 // indirect
- github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
+ cloud.google.com/go/compute v1.13.0 // indirect
+ cloud.google.com/go/compute/metadata v0.2.2 // indirect
+ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
+ contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
+ cuelang.org/go v0.4.3 // indirect
+ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect
+ github.com/Azure/azure-sdk-for-go v67.1.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
@@ -37,31 +45,60 @@ require (
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v1.1.0 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
+ github.com/OneOfOne/xxhash v1.2.8 // indirect
+ github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
+ github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/alessio/shellescape v1.4.1 // indirect
+ github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
+ github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect
+ github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect
+ github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect
+ github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect
+ github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect
+ github.com/alibabacloud-go/openapi-util v0.0.11 // indirect
+ github.com/alibabacloud-go/tea v1.1.20 // indirect
+ github.com/alibabacloud-go/tea-utils v1.4.5 // indirect
+ github.com/alibabacloud-go/tea-xml v1.1.2 // indirect
+ github.com/aliyun/credentials-go v1.2.4 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
- github.com/aws/aws-sdk-go-v2 v1.16.16 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.17.8 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 // indirect
- github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect
- github.com/aws/smithy-go v1.13.3 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.17.2 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.18.4 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecr v1.17.20 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.19 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect
+ github.com/aws/smithy-go v1.13.5 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver v3.5.1+incompatible // indirect
+ github.com/blendle/zapdriver v1.3.1 // indirect
+ github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
+ github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/clbanning/mxj/v2 v2.5.6 // indirect
+ github.com/cockroachdb/apd/v2 v2.0.2 // indirect
+ github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
+ github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
- github.com/docker/cli v20.10.20+incompatible // indirect
+ github.com/docker/cli v20.10.21+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/ghodss/yaml v1.0.0 // indirect
+ github.com/go-chi/chi v4.1.2+incompatible // indirect
+ github.com/go-kit/log v0.2.1 // indirect
+ github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
@@ -73,54 +110,120 @@ require (
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.0 // indirect
+ github.com/go-playground/locales v0.14.0 // indirect
+ github.com/go-playground/universal-translator v0.18.0 // indirect
+ github.com/go-playground/validator/v10 v10.11.1 // indirect
+ github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/certificate-transparency-go v1.1.4 // indirect
+ github.com/google/gnostic v0.6.9 // indirect
+ github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221114162634-781782aa2757 // indirect
+ github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221114162634-781782aa2757 // indirect
+ github.com/google/go-github/v45 v45.2.0 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect
+ github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 // indirect
+ github.com/google/uuid v1.3.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
+ github.com/in-toto/in-toto-golang v0.5.0 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
+ github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kelseyhightower/envconfig v1.4.0 // indirect
github.com/klauspost/compress v1.15.12 // indirect
- github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be // indirect
+ github.com/leodido/go-urn v1.2.1 // indirect
+ github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect
+ github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/oklog/ulid v1.3.1 // indirect
+ github.com/open-policy-agent/opa v0.45.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
+ github.com/prometheus/client_golang v1.13.0 // indirect
+ github.com/prometheus/client_model v0.3.0 // indirect
+ github.com/prometheus/common v0.37.0 // indirect
+ github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/prometheus/statsd_exporter v0.22.8 // indirect
+ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2 // indirect
- github.com/sigstore/sigstore v1.4.4 // indirect
+ github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
+ github.com/shibumi/go-pathspec v1.3.0 // indirect
+ github.com/sigstore/rekor v1.0.1 // indirect
+ github.com/sigstore/sigstore v1.5.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/afero v1.9.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
+ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
+ github.com/tchap/go-patricia/v2 v2.3.1 // indirect
+ github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613 // indirect
+ github.com/thales-e-security/pool v0.0.2 // indirect
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
+ github.com/tjfoc/gmsm v1.4.1 // indirect
+ github.com/transparency-dev/merkle v0.0.1 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
- go.mongodb.org/mongo-driver v1.10.2 // indirect
- golang.org/x/crypto v0.1.0 // indirect
+ github.com/xanzy/go-gitlab v0.73.1 // indirect
+ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
+ github.com/yashtewari/glob-intersection v0.1.0 // indirect
+ go.mongodb.org/mongo-driver v1.10.3 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.uber.org/atomic v1.10.0 // indirect
+ go.uber.org/multierr v1.8.0 // indirect
+ go.uber.org/zap v1.24.0 // indirect
+ golang.org/x/crypto v0.4.0 // indirect
+ golang.org/x/exp v0.0.0-20221026153819-32f3d567a233 // indirect
golang.org/x/mod v0.7.0 // indirect
- golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
- golang.org/x/oauth2 v0.1.0 // indirect
+ golang.org/x/net v0.4.0 // indirect
+ golang.org/x/oauth2 v0.3.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
+ golang.org/x/time v0.3.0 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
+ google.golang.org/api v0.104.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
- google.golang.org/grpc v1.50.1 // indirect
+ google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect
+ google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/klog/v2 v2.80.1 // indirect
- k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
- sigs.k8s.io/yaml v1.3.0 // indirect
+ k8s.io/client-go v0.25.4 // indirect
+ k8s.io/klog/v2 v2.80.2-0.20221028030830-9ae4992afb54 // indirect
+ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
+ k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect
+ sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
+ sigs.k8s.io/release-utils v0.7.3 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
diff --git a/go.sum b/go.sum
index 75cf4b3ee8..11bdab80ab 100644
--- a/go.sum
+++ b/go.sum
@@ -23,10 +23,10 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
-cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
-cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
-cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k=
+cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -39,9 +39,17 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI=
+contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
+contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
+contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
+cuelang.org/go v0.4.3 h1:W3oBBjDTm7+IZfCKZAmC8uDG0eYfJL4Pp/xbbCMKaVo=
+cuelang.org/go v0.4.3/go.mod h1:7805vR9H+VoBNdWFdI7jyDR3QLUPp4+naHfbcgp55HI=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE=
-github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw=
+github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs=
+github.com/Azure/azure-sdk-for-go v67.1.0+incompatible h1:oziYcaopbnIKfM69DL05wXdypiqfrUKdxUKrKpynJTw=
+github.com/Azure/azure-sdk-for-go v67.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -61,6 +69,8 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
+github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
+github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
@@ -72,89 +82,188 @@ github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E=
+github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE=
+github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
+github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc=
+github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo=
+github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc=
+github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts=
+github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0=
+github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c=
+github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc=
+github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU=
+github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI=
+github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY=
+github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY=
+github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA=
+github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 h1:NqugFkGxx1TXSh/pBcU00Y6bljgDPaFdh5MUSeJ7e50=
+github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY=
+github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE=
+github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8=
+github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE=
+github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/openapi-util v0.0.11 h1:iYnqOPR5hyEEnNZmebGyRMkkEJRWUEjDiiaOHZ5aNhA=
+github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg=
+github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
+github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
+github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
+github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
+github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
+github.com/alibabacloud-go/tea v1.1.20 h1:wFK4xEbvGYMtzTyHhIju9D7ecWxvSUdoLO6y4vDLFik=
+github.com/alibabacloud-go/tea v1.1.20/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
+github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE=
+github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE=
+github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw=
+github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA=
+github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw=
+github.com/alibabacloud-go/tea-xml v1.1.2 h1:oLxa7JUXm2EDFzMg+7oRsYc+kutgCVwm+bZlhhmvW5M=
+github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8=
+github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw=
+github.com/aliyun/credentials-go v1.2.4 h1:qu8c21BCvbaPJArEcsSk7GbSdxYFiACCjYzkEKCoeLA=
+github.com/aliyun/credentials-go v1.2.4/go.mod h1:/KowD1cfGSLrLsH28Jr8W+xwoId0ywIy5lNzDz6O1vw=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
+github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250=
-github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
-github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk=
-github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
-github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA=
-github.com/aws/aws-sdk-go-v2/config v1.17.8 h1:b9LGqNnOdg9vR4Q43tBTVWk4J6F+W774MSchvKJsqnE=
-github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA=
-github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.21 h1:4tjlyCD0hRGNQivh5dN8hbP30qQhMLBE/FgQR1vHHWM=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.4.1/go.mod h1:FglZcyeiBqcbvyinl+n14aT/EWC7S1MIH+Gan2iizt0=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 h1:W9vzPbvX7rOa/FacbQIDfnNrwxHkn5O+DdfmiIS4cHc=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5/go.mod h1:vk2+DbeZQFXznxJZSMnYrfnCHYxg4oT4Mdh59wSCkw4=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.4.1/go.mod h1:eD5Eo4drVP2FLTw0G+SMIPWNWvQRGGTtIZR2XeAagoA=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 h1:Y8dpvUxU4JecYktR5oNFEW+HmUWlA1Oh7mboTVyQWLg=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5/go.mod h1:gW979HGZOrhGvwjAS6VRgav6M9AYH9Kbey6y3GfF/EA=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI=
-github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHDnzZP/sKbhDWV2T1EOzFIM=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 h1:OwhhKc1P9ElfWbMKPIbMMZBV6hzJlL2JKD76wNNVzgQ=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
-github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM=
-github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
-github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
-github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA=
-github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
-github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04 h1:p2I85zYI9z5/c/3Q0LiO3RtNXcmXHTtJfml/hV16zNg=
-github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04/go.mod h1:Z+bXnIbhKJYSvxNwsNnwde7pDKxuqlEZCbUBoTwAqf0=
+github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw=
+github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8=
+github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
+github.com/aws/aws-sdk-go-v2/config v1.17.10/go.mod h1:/4np+UiJJKpWHN7Q+LZvqXYgyjgeXm5+lLfDI6TPZao=
+github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE=
+github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.23/go.mod h1:0awX9iRr/+UO7OwRQFpV1hNtXxOVuehpjVEzrIAYNcA=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.17.20 h1:nJnXfQggNZdrWz/0cm2ZGyddGK+FqTiN4QJGanzKZoY=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.17.20/go.mod h1:kEVGiy2tACP0cegVqx4MrjsgQMSgrtgRq1fSa+Ix6F0=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.19 h1:AwWP9a5n9a6kcgpTOfZ2/AeHKdq1Cb+HwgWQ1ADqiZM=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.19/go.mod h1:j3mVo8gEwXjgzf9PfORBnYUUQnnjkd4OY6y5JmubV94=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E=
+github.com/aws/aws-sdk-go-v2/service/kms v1.19.2 h1:pgOVfu7E6zBddKGks4TvL4YuFsL/oTpiWDIzs4WPLjY=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU=
+github.com/aws/aws-sdk-go-v2/service/sts v1.17.1/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4=
+github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c=
+github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
+github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221027043306-dc425bc05c64 h1:J+6PUCOmCU9A2iZDGsTGxdycxybJMp+fbFEMWWsQUgg=
+github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221027043306-dc425bc05c64/go.mod h1:oqbjAk8VeItfKctyahGuAyU61z4d0Fi1gHmlWjHWsMM=
+github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
+github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE=
+github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
+github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
+github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU=
+github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 h1:9Qh4lJ/KMr5iS1zfZ8I97+3MDpiKjl+0lZVUNBhdvRs=
-github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08/go.mod h1:MAuu1uDJNOS3T3ui0qmKdPUwm59+bO19BbTph2wZafE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206 h1:lG6Usi/kX/JBZzGz1H+nV+KwM97vThQeKunCbS6PutU=
+github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206/go.mod h1:1UmFRnmMnVsHwD+ZntmLkoVBB1ZLa6V+XXEbF6hZCxU=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
+github.com/clbanning/mxj/v2 v2.5.6 h1:Jm4VaCI/+Ug5Q57IzEoZbwx4iQFA6wkXv72juUSeK+g=
+github.com/clbanning/mxj/v2 v2.5.6/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E=
+github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
+github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
+github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw=
github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI=
+github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
+github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U=
+github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8=
+github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
-github.com/docker/cli v20.10.20+incompatible h1:lWQbHSHUFs7KraSN2jOJK7zbMS2jNCHI4mt4xUFUVQ4=
-github.com/docker/cli v20.10.20+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU=
+github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.22+incompatible h1:6jX4yB+NtcbldT90k7vBSaWJDB3i+zkVJT9BEK8kQkk=
github.com/docker/docker v20.10.22+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -162,27 +271,55 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 h1:PRxIJD8XjimM5aTknUK9w6DHLDox2r2M3DI4i2pnd3w=
github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936/go.mod h1:ttYvX5qlB+mlV1okblJqcSMtR4c52UKxDiX9GRBS8+Q=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
+github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/proto v1.6.15 h1:XbpwxmuOPrdES97FrSfpyy67SSCV/wBIKXqgJzh6hNw=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
+github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
+github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897 h1:E52jfcE64UG42SwLmrW0QByONfGynWuzBvm86BoB9z8=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
+github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -222,9 +359,18 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+
github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y=
github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
+github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b h1:0pOrjn0UzTcHdhDVdxrH8LwM7QLnAp8qiUtwXM04JEE=
github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b/go.mod h1:hGGmX3bRUkYkc9aKA6mkUxi6d+f1GmZF1je0FlVTgwU=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@@ -251,6 +397,11 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
@@ -258,9 +409,13 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -286,8 +441,16 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY=
+github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ=
+github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw=
+github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
+github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -296,15 +459,26 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8=
-github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
+github.com/google/go-containerregistry v0.12.2-0.20221114162634-781782aa2757 h1:pmegaRhUKXxTUrNhRz96PiBk4Ihfi6CoejIcFSWMTmg=
+github.com/google/go-containerregistry v0.12.2-0.20221114162634-781782aa2757/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
+github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221114162634-781782aa2757 h1:1qKTXnWK6DsOFFfjakWJKMlpfAwmykw6Jjk9SLBsZmI=
+github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221114162634-781782aa2757/go.mod h1:7QLaBZxN+nMCx82XO5R7qPHq0m61liEg8yca68zymHo=
+github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221114162634-781782aa2757 h1:FsE9anmDCfnvZBx/PxdW8JDVJrAtx8zkWkQyHoxA3Jc=
+github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221114162634-781782aa2757/go.mod h1:T6IXbpoY0IGBh0cyHZsIi/zmMBI5yInMr7ob1b+SCz0=
+github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI=
+github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -322,24 +496,68 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI=
github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg=
+github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 h1:GFmzYtwUMi1S2mjLxfrJ/CZ9gWDG+zeLtZByg/QEBkk=
+github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9/go.mod h1:vywkS3p2SgNmPL7oAWqU5PiiknzRMp+ol3a19jfY2PQ=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I=
+github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM=
+github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
+github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM=
+github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA=
+github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
+github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
+github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
+github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8=
+github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU=
+github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -347,12 +565,25 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
+github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
@@ -360,15 +591,25 @@ github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kE
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be h1:Cx2bsfM27RBF/45zP1xhFN9FHDxo40LdYdE5L+GWVTw=
-github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be/go.mod h1:j/WMsOEcTSfy6VR1PkiIo20qH1V9iRRzb7ishoKkN0g=
+github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8=
+github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
+github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -378,47 +619,83 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
+github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI=
+github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA=
+github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto=
+github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
+github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
+github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
+github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -427,49 +704,115 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
+github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0=
+github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/protocolbuffers/txtpbfmt v0.0.0-20201118171849-f6a6b3f636fc h1:gSVONBi2HWMFXCa9jFdYvYk7IwW/mTLxWOF7rXS4LO0=
+github.com/qur/ar v0.0.0-20130629153254-282534b91770/go.mod h1:SjlYv2m9lpV0UW6K7lDqVJwEIIvSjaHbGk7nIfY8Hxw=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
+github.com/sassoftware/go-rpmutils v0.1.1/go.mod h1:euhXULoBpvAxqrBHEyJS4Tsu3hHxUmQWNymxoJbzgUY=
+github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 h1:sUNzanSKA9z/h8xXl+ZJoxIYZL0Qx306MmxqRrvUgr0=
+github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74/go.mod h1:YlB8wFIZmFLZ1JllNBfSURzz52fBxbliNgYALk1UDmk=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
+github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
+github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/cosign v1.13.1 h1:+5oF8jisEcDw2TuXxCADC1u5//HfdnJhGbpv9Isiwu4=
github.com/sigstore/cosign v1.13.1/go.mod h1:PlfJODkovUOKsLrGI7Su57Ie/Eb/Ks7hRHw3tn5hQS4=
-github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2 h1:LD8LcwygdD2DxaINWwbkaUEBAknr205wmn66/N05s7c=
-github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2/go.mod h1:C/jZ3EZywl/Kew48fGMWQoh+1LxOMk0BkP3DHmtB+8M=
-github.com/sigstore/sigstore v1.4.4 h1:lVsnNTY8DUmy2hnwCPtimWfEqv+DIwleORkF8KyFsMs=
-github.com/sigstore/sigstore v1.4.4/go.mod h1:wIqu9sN72+pds31MMu89GchxXHy17k+VZWc+HY1ZXMA=
+github.com/sigstore/policy-controller v0.5.2 h1:0mJaYN71RDDpf1WoSEwXiuN6DOAFaC95ms3prHcVT3s=
+github.com/sigstore/policy-controller v0.5.2/go.mod h1:yJtROa8mbBAXAov6XM36B3IlP0MCA7x4Rgp4W99VjuA=
+github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c=
+github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g=
+github.com/sigstore/sigstore v1.5.0 h1:NqstQ6SwwhQsp6Ll0wgk/d9g5MlfmEppo14aquUjJ/8=
+github.com/sigstore/sigstore v1.5.0/go.mod h1:fRAaZ9xXh7ZQ0GJqZdpmNJ3pemuHBu2PgIAngmzIFSI=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -477,53 +820,107 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613 h1:iGnD/q9160NWqKZZ5vY4p0dMiYMRknzctfSkqA4nBDw=
+github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613/go.mod h1:g6AnIpDSYMcphz193otpSIzN+11Rs+AAIIC6rm1enug=
+github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg=
+github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU=
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 h1:1i/Afw3rmaR1gF3sfVkG2X6ldkikQwA9zY380LrR5YI=
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w=
+github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
+github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E=
+github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
+github.com/xanzy/go-gitlab v0.73.1 h1:UMagqUZLJdjss1SovIC+kJCH4k2AZWXl58gJd38Y/hI=
+github.com/xanzy/go-gitlab v0.73.1/go.mod h1:d/a0vswScO7Agg1CZNz15Ic6SSvBG9vfw8egL99t4kA=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=
+github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/zalando/go-keyring v0.1.0/go.mod h1:RaxNwUITJaHVdQ0VC7pELPZ3tOWn13nr0gZMZEhpVU0=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
-go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k=
-go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
+go.mongodb.org/mongo-driver v1.10.3 h1:XDQEvmh6z1EUsXuIkXE9TaVeqHw6SwS1uf93jFs0HBA=
+go.mongodb.org/mongo-driver v1.10.3/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
+go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200930160638-afb6bcd081ae/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
@@ -531,8 +928,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
+golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -543,6 +940,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20221026153819-32f3d567a233 h1:9bNbSKT4RPLEzne0Xh1v3NaNecsa1DKjkOuTbY6V9rI=
+golang.org/x/exp v0.0.0-20221026153819-32f3d567a233/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -571,16 +970,21 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -596,18 +1000,25 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200930145003-4acb6c075d10/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc=
-golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -617,8 +1028,10 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
-golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
+golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -628,14 +1041,18 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -655,6 +1072,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -664,9 +1082,12 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -675,15 +1096,26 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -706,13 +1138,17 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -745,6 +1181,7 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -767,6 +1204,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
+gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -780,12 +1220,15 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg=
+google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -817,8 +1260,10 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -830,10 +1275,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
-google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc=
+google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@@ -845,11 +1292,14 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
-google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -862,24 +1312,37 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
@@ -889,6 +1352,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
@@ -899,16 +1363,30 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I=
+k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
-k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs=
-k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8=
+k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw=
+k8s.io/klog/v2 v2.80.2-0.20221028030830-9ae4992afb54 h1:hWRbsoRWt44OEBnYUd4ceLy4ofBoh+p9vauWp/I5Gdg=
+k8s.io/klog/v2 v2.80.2-0.20221028030830-9ae4992afb54/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
+k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
+k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+knative.dev/pkg v0.0.0-20221221230956-4fd6eb8652b7 h1:YaO4KgF1Kp8BTi1hxMXDRnvsxCFq/wpotOD3jzrHmzw=
+knative.dev/pkg v0.0.0-20221221230956-4fd6eb8652b7/go.mod h1:IeUSNPPUpQnM35SjpnfCx0w5/V2RpEc+nmke6oPwpD0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
+sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kind v0.17.0 h1:CScmGz/wX66puA06Gj8OZb76Wmk7JIjgWf5JDvY7msM=
sigs.k8s.io/kind v0.17.0/go.mod h1:Qqp8AiwOlMZmJWs37Hgs31xcbiYXjtXlRBSftcnZXQk=
+sigs.k8s.io/release-utils v0.7.3 h1:6pS8x6c5RmdUgR9qcg1LO6hjUzuE4Yo9TGZ3DemrZdM=
+sigs.k8s.io/release-utils v0.7.3/go.mod h1:n0mVez/1PZYZaZUTJmxewxH3RJ/Lf7JUDh7TG1CASOE=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/pkg/commands/config.go b/pkg/commands/config.go
index f327aee974..e0256a8f91 100644
--- a/pkg/commands/config.go
+++ b/pkg/commands/config.go
@@ -77,6 +77,14 @@ func getBaseImage(bo *options.BuildOptions) build.GetBase {
if err != nil {
return nil, err
}
+
+ if bo.Verifier != nil {
+ base := ref.Context().Digest(desc.Digest.String())
+ if err := bo.Verifier.Verify(ctx, base, keychain); err != nil {
+ return nil, err
+ }
+ }
+
if desc.MediaType.IsIndex() {
return desc.ImageIndex()
}
diff --git a/pkg/commands/options/build.go b/pkg/commands/options/build.go
index 635958f8b4..da4ae2e7f0 100644
--- a/pkg/commands/options/build.go
+++ b/pkg/commands/options/build.go
@@ -17,8 +17,10 @@ limitations under the License.
package options
import (
+ "context"
"errors"
"fmt"
+ "log"
"os"
"path/filepath"
@@ -28,11 +30,32 @@ import (
"golang.org/x/tools/go/packages"
"github.com/google/ko/pkg/build"
+ "github.com/google/ko/pkg/policy"
)
const (
// configDefaultBaseImage is the default base image if not specified in .ko.yaml.
configDefaultBaseImage = "cgr.dev/chainguard/static:latest"
+
+ // configDefaultBaseImagePolicy is the default base image policy if not
+ // specified in .ko.yaml
+ configDefaultBaseImagePolicy = `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: ko-default-base-image-policy
+spec:
+ images:
+ - glob: cgr.dev/chainguard/static*
+ authorities:
+ - keyless:
+ url: https://fulcio.sigstore.dev
+ identities:
+ - issuer: https://token.actions.githubusercontent.com
+ subject: https://github.com/chainguard-images/images/.github/workflows/release.yaml@refs/heads/main
+ ctlog:
+ url: https://rekor.sigstore.dev
+`
)
// BuildOptions represents options for the ko builder.
@@ -71,6 +94,9 @@ type BuildOptions struct {
// BuildConfigs stores the per-image build config from `.ko.yaml`.
BuildConfigs map[string]build.Config
+
+ // Verifier is used to check that base images satisfy configured policies.
+ Verifier policy.Verifier
}
func AddBuildOptions(cmd *cobra.Command, bo *BuildOptions) {
@@ -160,7 +186,7 @@ func (bo *BuildOptions) LoadConfig() error {
if len(bo.BuildConfigs) == 0 {
var builds []build.Config
if err := v.UnmarshalKey("builds", &builds); err != nil {
- return fmt.Errorf("configuration section 'builds' cannot be parsed")
+ return fmt.Errorf("configuration section 'builds' cannot be parsed: %w", err)
}
buildConfigs, err := createBuildConfigMap(bo.WorkingDirectory, builds)
if err != nil {
@@ -169,9 +195,33 @@ func (bo *BuildOptions) LoadConfig() error {
bo.BuildConfigs = buildConfigs
}
+ vfy := policy.Verification{}
+ if err := v.UnmarshalKey("verification", &vfy); err != nil {
+ return fmt.Errorf("configuration section 'verification' cannot be parsed: %w", err)
+ }
+ verificationDefaults(&vfy)
+ vfr, err := policy.Compile(context.Background(), vfy, func(s string, i ...interface{}) {
+ log.Printf("WARNING: %s", fmt.Sprintf(s, i...))
+ })
+ if err != nil {
+ return fmt.Errorf("compiling verification: %w", err)
+ }
+ bo.Verifier = vfr
+
return nil
}
+func verificationDefaults(vfy *policy.Verification) {
+ if vfy.NoMatchPolicy == "" {
+ vfy.NoMatchPolicy = "warn"
+ }
+ if vfy.Policies == nil {
+ vfy.Policies = &[]policy.PolicyData{{
+ Data: configDefaultBaseImagePolicy,
+ }}
+ }
+}
+
func createBuildConfigMap(workingDirectory string, configs []build.Config) (map[string]build.Config, error) {
buildConfigsByImportPath := make(map[string]build.Config)
for i, config := range configs {
diff --git a/pkg/policy/parse.go b/pkg/policy/parse.go
new file mode 100644
index 0000000000..3b2133df9c
--- /dev/null
+++ b/pkg/policy/parse.go
@@ -0,0 +1,114 @@
+// Copyright 2023 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/sigstore/policy-controller/pkg/apis/policy/v1alpha1"
+ "github.com/sigstore/policy-controller/pkg/apis/policy/v1beta1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ "sigs.k8s.io/yaml"
+)
+
+// Parse decodes a provided YAML document containing zero or more objects into
+// a collection of unstructured.Unstructured objects.
+func Parse(ctx context.Context, document string) ([]*unstructured.Unstructured, error) {
+ docs := strings.Split(document, "\n---\n")
+
+ objs := make([]*unstructured.Unstructured, 0, len(docs))
+ for i, doc := range docs {
+ doc = strings.TrimSpace(doc)
+ if doc == "" {
+ continue
+ }
+ var obj unstructured.Unstructured
+ if err := yaml.Unmarshal([]byte(doc), &obj); err != nil {
+ return nil, fmt.Errorf("decoding object[%d]: %w", i, err)
+ }
+ if obj.GetAPIVersion() == "" {
+ return nil, apis.ErrMissingField("apiVersion").ViaIndex(i)
+ }
+ if obj.GetName() == "" {
+ return nil, apis.ErrMissingField("metadata.name").ViaIndex(i)
+ }
+ objs = append(objs, &obj)
+ }
+ return objs, nil
+}
+
+// ParseClusterImagePolicies returns ClusterImagePolicy objects found in the
+// policy document.
+func ParseClusterImagePolicies(ctx context.Context, document string) (cips []*v1alpha1.ClusterImagePolicy, warns error, err error) {
+ if warns, err = Validate(ctx, document); err != nil {
+ return nil, warns, err
+ }
+
+ ol, err := Parse(ctx, document)
+ if err != nil {
+ return nil, warns, err
+ }
+
+ cips = make([]*v1alpha1.ClusterImagePolicy, 0)
+ for _, obj := range ol {
+ gv, err := schema.ParseGroupVersion(obj.GetAPIVersion())
+ if err != nil {
+ // Practically unstructured.Unstructured won't let this happen.
+ return nil, warns, fmt.Errorf("error parsing apiVersion of: %w", err)
+ }
+
+ cip := &v1alpha1.ClusterImagePolicy{}
+
+ switch gv.WithKind(obj.GetKind()).GroupKind() {
+ case v1beta1.SchemeGroupVersion.WithKind("ClusterImagePolicy").GroupKind():
+ v1b1 := &v1beta1.ClusterImagePolicy{}
+ if err := convert(obj, v1b1); err != nil {
+ return nil, warns, err
+ }
+ if err := cip.ConvertFrom(ctx, v1b1); err != nil {
+ return nil, warns, err
+ }
+
+ case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicy").GroupKind():
+ // This is allowed, but we should convert things.
+ if err := convert(obj, cip); err != nil {
+ return nil, warns, err
+ }
+
+ default:
+ continue
+ }
+
+ cips = append(cips, cip)
+ }
+ return cips, warns, nil
+}
+
+func convert(from interface{}, to runtime.Object) error {
+ bs, err := json.Marshal(from)
+ if err != nil {
+ return fmt.Errorf("Marshal() = %w", err)
+ }
+ if err := json.Unmarshal(bs, to); err != nil {
+ return fmt.Errorf("Unmarshal() = %w", err)
+ }
+ return nil
+}
diff --git a/pkg/policy/parse_test.go b/pkg/policy/parse_test.go
new file mode 100644
index 0000000000..0c439dce78
--- /dev/null
+++ b/pkg/policy/parse_test.go
@@ -0,0 +1,189 @@
+// Copyright 2023 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "knative.dev/pkg/apis"
+)
+
+func TestParse(t *testing.T) {
+ tests := []struct {
+ name string
+ doc string
+ want []*unstructured.Unstructured
+ wantErr error
+ }{{
+ name: "good single object",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec: {}
+`,
+ want: []*unstructured.Unstructured{{
+ Object: map[string]interface{}{
+ "apiVersion": "policy.sigstore.dev/v1beta1",
+ "kind": "ClusterImagePolicy",
+ "metadata": map[string]interface{}{
+ "name": "blah",
+ },
+ "spec": map[string]interface{}{},
+ },
+ }},
+ }, {
+ name: "good multi-object",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec: {}
+---
+---
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: foo
+spec: {}
+---
+---
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: bar
+spec: {}
+`,
+ want: []*unstructured.Unstructured{{
+ Object: map[string]interface{}{
+ "apiVersion": "policy.sigstore.dev/v1beta1",
+ "kind": "ClusterImagePolicy",
+ "metadata": map[string]interface{}{
+ "name": "blah",
+ },
+ "spec": map[string]interface{}{},
+ },
+ }, {
+ Object: map[string]interface{}{
+ "apiVersion": "policy.sigstore.dev/v1beta1",
+ "kind": "ClusterImagePolicy",
+ "metadata": map[string]interface{}{
+ "name": "foo",
+ },
+ "spec": map[string]interface{}{},
+ },
+ }, {
+ Object: map[string]interface{}{
+ "apiVersion": "policy.sigstore.dev/v1beta1",
+ "kind": "ClusterImagePolicy",
+ "metadata": map[string]interface{}{
+ "name": "bar",
+ },
+ "spec": map[string]interface{}{},
+ },
+ }},
+ }, {
+ name: "bad missing apiVersion",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec: {}
+---
+# Missing: apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: foo
+spec: {}
+---
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: bar
+spec: {}
+`,
+ wantErr: apis.ErrMissingField("[1].apiVersion"),
+ }, {
+ name: "bad missing kind",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec: {}
+---
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: foo
+spec: {}
+---
+apiVersion: policy.sigstore.dev/v1beta1
+# Missing: kind: ClusterImagePolicy
+metadata:
+ name: bar
+spec: {}
+`,
+ wantErr: errors.New(`decoding object[2]: error unmarshaling JSON: while decoding JSON: Object 'Kind' is missing in '{"apiVersion":"policy.sigstore.dev/v1beta1","metadata":{"name":"bar"},"spec":{}}'`),
+ }, {
+ name: "bad missing apiVersion",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ # Missing: name: blah
+sp dec: {}
+---
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: foo
+spec: {}
+---
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: bar
+spec: {}
+`,
+ wantErr: apis.ErrMissingField("[0].metadata.name"),
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got, gotErr := Parse(context.Background(), test.doc)
+
+ switch {
+ case (gotErr != nil) != (test.wantErr != nil):
+ t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr)
+ case gotErr != nil && gotErr.Error() != test.wantErr.Error():
+ t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr)
+ case gotErr != nil:
+ return // This was an error test.
+ }
+
+ if diff := cmp.Diff(got, test.want); diff != "" {
+ t.Errorf("Parse (-got, +want) = %s", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/policy/policy.go b/pkg/policy/policy.go
new file mode 100644
index 0000000000..7622973318
--- /dev/null
+++ b/pkg/policy/policy.go
@@ -0,0 +1,91 @@
+// Copyright 2023 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+ "knative.dev/pkg/apis"
+)
+
+type Verification struct {
+ // NoMatchPolicy specifies the behavior when a base image doesn't match any
+ // of the listed policies. It allows the values: allow, deny, and warn.
+ NoMatchPolicy string `yaml:"no-match-policy,omitempty"`
+
+ // Policies specifies a collection of policies to use to cover the base
+ // images used as part of evaluation. See "policy" below for usage.
+ Policies *[]PolicyData `yaml:"policies,omitempty"`
+}
+
+// PolicyData contains a set of options for specifying a PolicyData. Exactly
+// one of the fields may be specified for each PolicyData entry.
+type PolicyData struct {
+ // Data is a collection of one or more ClusterImagePolicy resources.
+ Data string `yaml:"data,omitempty"`
+
+ // TODO(mattmoor): Path support
+ // // Path is a path to a file or directory containing ClusterImagePolicy resources.
+ // // TODO(mattmoor): How do we want to handle something like -R? Perhaps we
+ // // don't and encourage folks to list each directory individually?
+ // Path string `yaml:"path,omitempty"`
+
+ // TODO(mattmoor): URL support
+ // // URL links to a file containing one or more ClusterImagePolicy resources.
+ // URL string `yaml:"url,omitempty"`
+}
+
+func (v *Verification) Validate(ctx context.Context) (errs *apis.FieldError) {
+ switch v.NoMatchPolicy {
+ case "allow", "deny", "warn":
+ // Good!
+ case "":
+ errs = errs.Also(apis.ErrMissingField("noMatchPolicy"))
+ default:
+ errs = errs.Also(apis.ErrInvalidValue(v.NoMatchPolicy, "noMatchPolicy"))
+ }
+
+ if v.Policies == nil {
+ errs = errs.Also(apis.ErrMissingField("policies"))
+ } else {
+ for i, p := range *v.Policies {
+ errs = errs.Also(p.Validate(ctx).ViaFieldIndex("policies", i))
+ }
+ }
+
+ return errs
+}
+
+func (pd *PolicyData) Validate(ctx context.Context) (errs *apis.FieldError) {
+ // Check that exactly one of the fields is set.
+ set := sets.NewString()
+ if pd.Data != "" {
+ set.Insert("data")
+ // TODO(mattmoor): Validate data.
+ }
+ // TODO(mattmoor): Check for the other fields as we add them here.
+
+ switch set.Len() {
+ case 0:
+ // TODO: Change this to ErrMissingOneOf when we add more fields.
+ errs = errs.Also(apis.ErrMissingField("data"))
+ case 1:
+ // What we want.
+ default:
+ errs = errs.Also(apis.ErrMultipleOneOf(set.List()...))
+ }
+ return errs
+}
diff --git a/pkg/policy/validate.go b/pkg/policy/validate.go
new file mode 100644
index 0000000000..62c3ae5971
--- /dev/null
+++ b/pkg/policy/validate.go
@@ -0,0 +1,112 @@
+// Copyright 2023 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/sigstore/policy-controller/pkg/apis/policy/v1alpha1"
+ "github.com/sigstore/policy-controller/pkg/apis/policy/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "knative.dev/pkg/apis"
+)
+
+var (
+ // ErrEmptyDocument is the error returned when no document body is
+ // specified.
+ ErrEmptyDocument = errors.New("document is required to create policy")
+
+ // ErrUnknownType is the error returned when a type contained in the policy
+ // is unrecognized.
+ ErrUnknownType = errors.New("unknown type")
+)
+
+// Validate decodes a provided YAML document containing zero or more objects
+// and performs limited validation on them.
+func Validate(ctx context.Context, document string) (warns error, err error) {
+ if len(document) == 0 {
+ return nil, ErrEmptyDocument
+ }
+
+ uol, err := Parse(ctx, document)
+ if err != nil {
+ return nil, err
+ }
+
+ for i, uo := range uol {
+ switch uo.GroupVersionKind() {
+ case v1beta1.SchemeGroupVersion.WithKind("ClusterImagePolicy"):
+ if warns, err = validate(ctx, uo, &v1beta1.ClusterImagePolicy{}); err != nil {
+ return
+ }
+
+ case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicy"):
+ if warns, err = validate(ctx, uo, &v1alpha1.ClusterImagePolicy{}); err != nil {
+ return
+ }
+
+ case corev1.SchemeGroupVersion.WithKind("Secret"):
+ if uo.GetNamespace() != "cosign-system" {
+ return warns, apis.ErrInvalidValue(uo.GetNamespace(), "metadata.namespace").ViaIndex(i)
+ }
+ // Any additional validation worth performing? Should we check the
+ // schema of the secret matches the expectations of cosigned?
+
+ default:
+ return warns, fmt.Errorf("%w: %v", ErrUnknownType, uo.GroupVersionKind())
+ }
+ }
+ return warns, nil
+}
+
+type crd interface {
+ apis.Validatable
+ apis.Defaultable
+}
+
+func validate(ctx context.Context, uo *unstructured.Unstructured, v crd) (warns error, err error) {
+ b, err := json.Marshal(uo)
+ if err != nil {
+ return nil, fmt.Errorf("unable to marshal: %w", err)
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.DisallowUnknownFields()
+ if err := dec.Decode(v); err != nil {
+ return nil, fmt.Errorf("unable to unmarshal: %w", err)
+ }
+
+ // Apply defaulting to simulate the defaulting webhook that runs prior
+ // to validation.
+ v.SetDefaults(ctx)
+
+ // We can't just return v.Validate(ctx) because of Go's typed nils.
+ // nolint:revive
+ if ve := v.Validate(ctx); ve != nil {
+ // Separate validation warnings from errors so the caller can discern between them.
+ if warnFE := ve.Filter(apis.WarningLevel); warnFE != nil {
+ warns = warnFE
+ }
+ if errorFE := ve.Filter(apis.ErrorLevel); errorFE != nil {
+ err = errorFE
+ }
+ }
+ return
+}
diff --git a/pkg/policy/validate_test.go b/pkg/policy/validate_test.go
new file mode 100644
index 0000000000..3172aa9d8d
--- /dev/null
+++ b/pkg/policy/validate_test.go
@@ -0,0 +1,203 @@
+// Copyright 2023 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ policycontrollerconfig "github.com/sigstore/policy-controller/pkg/config"
+ "knative.dev/pkg/apis"
+)
+
+func TestValidate(t *testing.T) {
+ tests := []struct {
+ name string
+ doc string
+ wantWarns error
+ wantErr error
+ allowEmptyAuthorities bool
+ }{{
+ name: "good single object",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ images:
+ - glob: '*'
+ authorities:
+ - keyless:
+ identities:
+ - issuer: https://issuer.example.com
+ subject: foo@example.com
+ url: https://fulcio.sigstore.dev
+`,
+ wantErr: nil,
+ }, {
+ name: "good CIP and Secret",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ images:
+ - glob: '*'
+ authorities:
+ - keyless:
+ identities:
+ - issuer: https://issuer.example.com
+ subject: foo@example.com
+ url: https://fulcio.sigstore.dev
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: foo
+ namespace: cosign-system
+stringData:
+ foo: bar
+`,
+ wantErr: nil,
+ }, {
+ name: "bad secret namespace",
+ doc: `
+apiVersion: v1
+kind: Secret
+metadata:
+ name: foo
+ namespace: something-system
+stringData:
+ foo: bar
+`,
+ wantErr: errors.New(`invalid value: something-system: [0].metadata.namespace`),
+ }, {
+ name: "bad image policy",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ images:
+ - glob: '*'
+ authorities:
+ - key: {}
+`,
+ wantErr: apis.ErrMissingOneOf("data", "kms", "secretref").ViaField("key").ViaFieldIndex("authorities", 0).ViaField("spec"),
+ }, {
+ name: "empty document",
+ doc: ``,
+ wantErr: ErrEmptyDocument,
+ }, {
+ name: "object missing kind",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+# Missing: kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec: {}
+`,
+ wantErr: errors.New(`decoding object[0]: error unmarshaling JSON: while decoding JSON: Object 'Kind' is missing in '{"apiVersion":"policy.sigstore.dev/v1beta1","metadata":{"name":"blah"},"spec":{}}'`),
+ }, {
+ name: "unknown field",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ asdf: dfsadf
+`,
+ wantErr: errors.New(`unable to unmarshal: json: unknown field "asdf"`),
+ }, {
+ name: "unknown type",
+ doc: `
+apiVersion: unknown.dev/v1
+kind: OtherPolicy
+metadata:
+ name: blah
+spec: {}
+`,
+ wantErr: errors.New(`unknown type: unknown.dev/v1, Kind=OtherPolicy`),
+ }, {
+ name: "warning - missing field",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ images:
+ - glob: '*'
+ authorities:
+ - keyless:
+ url: https://fulcio.sigstore.dev
+`,
+ wantWarns: errors.New("missing field(s): spec.authorities[0].keyless.identities"),
+ wantErr: nil,
+ },
+ {
+ name: "admit - missing authorities",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ images:
+ - glob: '*'
+`,
+ wantErr: nil,
+ allowEmptyAuthorities: true,
+ }, {
+ name: "deny - missing authorities",
+ doc: `
+apiVersion: policy.sigstore.dev/v1beta1
+kind: ClusterImagePolicy
+metadata:
+ name: blah
+spec:
+ images:
+ - glob: '*'
+`,
+ wantErr: errors.New("missing field(s): spec.authorities"),
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ testContext := context.Background()
+ if test.allowEmptyAuthorities {
+ testContext = policycontrollerconfig.ToContext(testContext, &policycontrollerconfig.PolicyControllerConfig{FailOnEmptyAuthorities: false})
+ }
+ gotWarns, gotErr := Validate(testContext, test.doc)
+ if (gotErr != nil) != (test.wantErr != nil) {
+ t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr)
+ }
+ if (gotWarns != nil) != (test.wantWarns != nil) {
+ t.Fatalf("Parse() = %v, wanted %v", gotWarns, test.wantWarns)
+ }
+ if gotErr != nil && gotErr.Error() != test.wantErr.Error() {
+ t.Fatalf("Parse() = %v, wanted %v", gotErr, test.wantErr)
+ }
+ if gotWarns != nil && gotWarns.Error() != test.wantWarns.Error() {
+ t.Fatalf("Parse() = %v, wanted %v", gotWarns, test.wantWarns)
+ }
+ })
+ }
+}
diff --git a/pkg/policy/verifier.go b/pkg/policy/verifier.go
new file mode 100644
index 0000000000..4d7a0941f0
--- /dev/null
+++ b/pkg/policy/verifier.go
@@ -0,0 +1,147 @@
+// Copyright 2023 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+ ociremote "github.com/sigstore/cosign/pkg/oci/remote"
+ "github.com/sigstore/policy-controller/pkg/apis/config"
+ "github.com/sigstore/policy-controller/pkg/webhook"
+ webhookcip "github.com/sigstore/policy-controller/pkg/webhook/clusterimagepolicy"
+ "knative.dev/pkg/apis"
+)
+
+// Verifier is the interface for checking that a given image digest satisfies
+// the policies backing this interface.
+type Verifier interface {
+ // Verify checks that the provided digest satisfies the backing policies.
+ Verify(context.Context, name.Digest, authn.Keychain) error
+}
+
+// WarningWriter is used to surface warning messages in a manner that
+// is customizable by callers that's suitable for their execution
+// environment.
+type WarningWriter func(string, ...interface{})
+
+// Compile turns a Verification into an executable Verifier.
+// Any compilation errors are returned here.
+func Compile(ctx context.Context, v Verification, ww WarningWriter) (Verifier, error) {
+ // TODO(mattmoor): Validate NoMatchPolicy.
+ // TODO(mattmoor): Validate Policies.
+
+ ipc, err := gather(context.Background(), v, ww)
+ if err != nil {
+ return nil, err
+ }
+
+ return &impl{
+ verification: v,
+ ipc: ipc,
+ ww: ww,
+ }, nil
+}
+
+func gather(ctx context.Context, v Verification, ww WarningWriter) (*config.ImagePolicyConfig, error) {
+ pol := *v.Policies
+ ipc := &config.ImagePolicyConfig{
+ Policies: make(map[string]webhookcip.ClusterImagePolicy, len(pol)),
+ }
+
+ for i, p := range pol {
+ switch {
+ case p.Data != "":
+ l, warns, err := ParseClusterImagePolicies(ctx, p.Data)
+ if err != nil {
+ return nil, fmt.Errorf("parsing policies: %w", err)
+ }
+ if warns != nil {
+ ww("policy %d: %v", i, warns)
+ }
+
+ // TODO(mattmoor): Add additional checks for unsupported things,
+ // like Match, IncludeSpec, etc.
+
+ for _, cip := range l {
+ cip.SetDefaults(ctx)
+ if _, ok := ipc.Policies[cip.Name]; ok {
+ ww("duplicate policy named %q, skipping.", cip.Name)
+ continue
+ }
+ ipc.Policies[cip.Name] = *webhookcip.ConvertClusterImagePolicyV1alpha1ToWebhook(cip)
+ }
+ default:
+ return nil, fmt.Errorf("unsupported policy shape: %v", p)
+ }
+ }
+
+ return ipc, nil
+}
+
+type impl struct {
+ verification Verification
+
+ ipc *config.ImagePolicyConfig
+ ww WarningWriter
+}
+
+// Check that impl implements Verifier
+var _ Verifier = (*impl)(nil)
+
+// Verify implements Verifier
+func (i *impl) Verify(ctx context.Context, d name.Digest, kc authn.Keychain) error {
+ matches, err := i.ipc.GetMatchingPolicies(d.Name(), "" /* kind */, "" /* apiVersion */, nil /* labels */)
+ if err != nil {
+ return err
+ }
+
+ if len(matches) == 0 {
+ switch i.verification.NoMatchPolicy {
+ case "allow":
+ return nil
+ case "warn":
+ i.ww("%s is uncovered by policy", d)
+ case "deny":
+ return fmt.Errorf("%s is uncovered by policy", d)
+ default:
+ return fmt.Errorf("unsupported noMatchPolicy: %q", i.verification.NoMatchPolicy)
+ }
+ }
+
+ for _, p := range matches {
+ _, errs := webhook.ValidatePolicy(ctx, "" /* namespace */, d, p,
+ kc, ociremote.WithRemoteOptions(remote.WithAuthFromKeychain(kc)))
+ for _, err := range errs {
+ var fe *apis.FieldError
+ if errors.As(err, &fe) {
+ if warnFE := fe.Filter(apis.WarningLevel); warnFE != nil {
+ i.ww("%v", warnFE)
+ }
+ if errorFE := fe.Filter(apis.ErrorLevel); errorFE != nil {
+ return errorFE
+ }
+ } else {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go
index 5ac4a843e1..efedadbea2 100644
--- a/vendor/cloud.google.com/go/compute/internal/version.go
+++ b/vendor/cloud.google.com/go/compute/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.12.1"
+const Version = "1.13.0"
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 8631b6d6d2..6e3ee8d6ab 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
+
## [0.1.0] (2022-10-26)
Initial release of metadata being it's own module.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 50538b1d34..d4aad9bf39 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client {
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
+ IdleConnTimeout: 60 * time.Second,
},
Timeout: 5 * time.Second,
}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.gitignore b/vendor/contrib.go.opencensus.io/exporter/ocagent/.gitignore
new file mode 100644
index 0000000000..c435b7ebb6
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.gitignore
@@ -0,0 +1,17 @@
+# IntelliJ IDEA
+.idea
+*.iml
+.editorconfig
+
+# VS Code
+.vscode
+
+# OS X
+.DS_Store
+
+# Emacs
+*~
+\#*\#
+
+# Vim
+.swp
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
new file mode 100644
index 0000000000..f53103b1a3
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+go:
+ - 1.11.x
+
+go_import_path: contrib.go.opencensus.io/exporter/ocagent
+
+install: skip
+
+before_script:
+ - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
+ - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
+ - GO111MODULE=on # Depend on go.mod for dependencies
+
+script:
+ - go build ./... # Ensure dependency updates don't break build
+ - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
+ - go vet ./...
+ - go test -v -race $PKGS # Run all the tests with the race detector enabled
+ - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
new file mode 100644
index 0000000000..0786fdf434
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
@@ -0,0 +1,24 @@
+# How to contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult [GitHub Help] for more
+information on using pull requests.
+
+[GitHub Help]: https://help.github.com/articles/about-pull-requests/
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
new file mode 100644
index 0000000000..3b9e908f59
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
@@ -0,0 +1,61 @@
+# OpenCensus Agent Go Exporter
+
+[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
+
+
+This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
+OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
+OpenCensus Library, export them to other backends and possibly push configurations back to
+Library. See more details on [OC-Agent Readme][OCAgentReadme].
+
+Note: This is an experimental repository and is likely to get backwards-incompatible changes.
+Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
+
+## Installation
+
+```bash
+$ go get -u contrib.go.opencensus.io/exporter/ocagent
+```
+
+## Usage
+
+```go
+import (
+ "context"
+ "fmt"
+ "log"
+ "time"
+
+ "contrib.go.opencensus.io/exporter/ocagent"
+ "go.opencensus.io/trace"
+)
+
+func Example() {
+ exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
+ if err != nil {
+ log.Fatalf("Failed to create the agent exporter: %v", err)
+ }
+ defer exp.Stop()
+
+ // Now register it as a trace exporter.
+ trace.RegisterExporter(exp)
+
+ // Then use the OpenCensus tracing library, like we normally would.
+ ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
+ defer span.End()
+
+ for i := 0; i < 10; i++ {
+ _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
+ <-time.After(6 * time.Millisecond)
+ iSpan.End()
+ }
+}
+```
+
+[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
+[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
+[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
+[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
+[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
+[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent
+
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
new file mode 100644
index 0000000000..297e44b6e7
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
@@ -0,0 +1,38 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "math/rand"
+ "time"
+)
+
+var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// retries function fn upto n times, if fn returns an error lest it returns nil early.
+// It applies exponential backoff in units of (1< 0 {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
+ }
+ traceExporter, err := traceSvcClient.Export(ctx)
+ if err != nil {
+ return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
+ }
+
+ firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
+ Node: node,
+ Resource: ae.resource,
+ }
+ if err := traceExporter.Send(firstTraceMessage); err != nil {
+ return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
+ }
+
+ ae.mu.Lock()
+ ae.traceExporter = traceExporter
+ ae.mu.Unlock()
+
+ // Initiate the config service by sending over node identifier info.
+ configStream, err := traceSvcClient.Config(context.Background())
+ if err != nil {
+ return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
+ }
+ firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
+ if err := configStream.Send(firstCfgMessage); err != nil {
+ return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
+ }
+
+ // In the background, handle trace configurations that are beamed down
+ // by the agent, but also reply to it with the applied configuration.
+ go ae.handleConfigStreaming(configStream)
+
+ return nil
+}
+
+func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
+ metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
+ metricsExporter, err := metricsSvcClient.Export(context.Background())
+ if err != nil {
+ return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
+ }
+ // Initiate the metrics service by sending over the first message just containing the Node and Resource.
+ firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
+ Node: node,
+ Resource: ae.resource,
+ }
+ if err := metricsExporter.Send(firstMetricsMessage); err != nil {
+ return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
+ }
+
+ ae.mu.Lock()
+ ae.metricsExporter = metricsExporter
+ ae.mu.Unlock()
+
+ // With that we are good to go and can start sending metrics
+ return nil
+}
+
+func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
+ addr := ae.prepareAgentAddress()
+ var dialOpts []grpc.DialOption
+ if ae.clientTransportCredentials != nil {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
+ } else if ae.canDialInsecure {
+ dialOpts = append(dialOpts, grpc.WithInsecure())
+ }
+ if ae.compressor != "" {
+ dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
+ }
+ dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
+ if len(ae.grpcDialOptions) != 0 {
+ dialOpts = append(dialOpts, ae.grpcDialOptions...)
+ }
+
+ ctx := context.Background()
+ if len(ae.headers) > 0 {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
+ }
+ return grpc.DialContext(ctx, addr, dialOpts...)
+}
+
+func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
+ // Note: We haven't yet implemented configuration sending so we
+ // should NOT be changing connection states within this function for now.
+ for {
+ recv, err := configStream.Recv()
+ if err != nil {
+ // TODO: Check if this is a transient error or exponential backoff-able.
+ return err
+ }
+ cfg := recv.Config
+ if cfg == nil {
+ continue
+ }
+
+ // Otherwise now apply the trace configuration sent down from the agent
+ if psamp := cfg.GetProbabilitySampler(); psamp != nil {
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
+ } else if csamp := cfg.GetConstantSampler(); csamp != nil {
+ alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
+ if alwaysSample {
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
+ } else {
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
+ }
+ } else { // TODO: Add the rate limiting sampler here
+ }
+
+ // Then finally send back to upstream the newly applied configuration
+ err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
+ if err != nil {
+ return err
+ }
+ }
+}
+
+// Stop shuts down all the connections and resources
+// related to the exporter.
+func (ae *Exporter) Stop() error {
+ ae.mu.RLock()
+ cc := ae.grpcClientConn
+ started := ae.started
+ stopped := ae.stopped
+ ae.mu.RUnlock()
+
+ if !started {
+ return errNotStarted
+ }
+ if stopped {
+ // TODO: tell the user that we've already stopped, so perhaps a sentinel error?
+ return nil
+ }
+
+ ae.Flush()
+
+ // Now close the underlying gRPC connection.
+ var err error
+ if cc != nil {
+ err = cc.Close()
+ }
+
+ // At this point we can change the state variables: started and stopped
+ ae.mu.Lock()
+ ae.started = false
+ ae.stopped = true
+ ae.mu.Unlock()
+ close(ae.stopCh)
+
+ // Ensure that the backgroundConnector returns
+ <-ae.backgroundConnectionDoneCh
+
+ return err
+}
+
+func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
+ if sd == nil {
+ return
+ }
+ _ = ae.traceBundler.Add(sd, 1)
+}
+
+func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
+ if batch == nil || len(batch.Spans) == 0 {
+ return nil
+ }
+
+ select {
+ case <-ae.stopCh:
+ return errStopped
+
+ default:
+ if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil {
+ return fmt.Errorf("ExportTraceServiceRequest: no active connection, last connection error: %v", lastConnectErr)
+ }
+
+ ae.senderMu.Lock()
+ err := ae.traceExporter.Send(batch)
+ ae.senderMu.Unlock()
+ if err != nil {
+ if err == io.EOF {
+ ae.recvMu.Lock()
+ // Perform a .Recv to try to find out why the RPC actually ended.
+ // See:
+ // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100
+ // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ
+ for {
+ _, err = ae.traceExporter.Recv()
+ if err != nil {
+ break
+ }
+ }
+ ae.recvMu.Unlock()
+ }
+
+ ae.setStateDisconnected(err)
+ if err != io.EOF {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func (ae *Exporter) ExportView(vd *view.Data) {
+ if vd == nil {
+ return
+ }
+ _ = ae.viewDataBundler.Add(vd, 1)
+}
+
+// ExportMetricsServiceRequest sends proto metrics with the metrics service client.
+func (ae *Exporter) ExportMetricsServiceRequest(batch *agentmetricspb.ExportMetricsServiceRequest) error {
+ if batch == nil || len(batch.Metrics) == 0 {
+ return nil
+ }
+
+ select {
+ case <-ae.stopCh:
+ return errStopped
+
+ default:
+ if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil {
+ return fmt.Errorf("ExportMetricsServiceRequest: no active connection, last connection error: %v", lastConnectErr)
+ }
+
+ ae.senderMu.Lock()
+ err := ae.metricsExporter.Send(batch)
+ ae.senderMu.Unlock()
+ if err != nil {
+ if err == io.EOF {
+ ae.recvMu.Lock()
+ // Perform a .Recv to try to find out why the RPC actually ended.
+ // See:
+ // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100
+ // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ
+ for {
+ _, err = ae.metricsExporter.Recv()
+ if err != nil {
+ break
+ }
+ }
+ ae.recvMu.Unlock()
+ }
+
+ ae.setStateDisconnected(err)
+ if err != io.EOF {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func ocSpanDataToPbSpans(sdl []*trace.SpanData, spanConfig SpanConfig) []*tracepb.Span {
+ if len(sdl) == 0 {
+ return nil
+ }
+ protoSpans := make([]*tracepb.Span, 0, len(sdl))
+ for _, sd := range sdl {
+ if sd != nil {
+ protoSpans = append(protoSpans, ocSpanToProtoSpan(sd, spanConfig))
+ }
+ }
+ return protoSpans
+}
+
+func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
+ select {
+ case <-ae.stopCh:
+ return
+
+ default:
+ if !ae.connected() {
+ return
+ }
+
+ protoSpans := ocSpanDataToPbSpans(sdl, ae.spanConfig)
+ if len(protoSpans) == 0 {
+ return
+ }
+ ae.senderMu.Lock()
+ err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
+ Spans: protoSpans,
+ Resource: ae.resource,
+ })
+ ae.senderMu.Unlock()
+ if err != nil {
+ ae.setStateDisconnected(err)
+ }
+ }
+}
+
+func ocViewDataToPbMetrics(vdl []*view.Data, metricNamePrefix string) []*metricspb.Metric {
+ if len(vdl) == 0 {
+ return nil
+ }
+ metrics := make([]*metricspb.Metric, 0, len(vdl))
+ for _, vd := range vdl {
+ if vd != nil {
+ vmetric, err := viewDataToMetric(vd, metricNamePrefix)
+ // TODO: (@odeke-em) somehow report this error, if it is non-nil.
+ if err == nil && vmetric != nil {
+ metrics = append(metrics, vmetric)
+ }
+ }
+ }
+ return metrics
+}
+
+func (ae *Exporter) uploadViewData(vdl []*view.Data) {
+ protoMetrics := ocViewDataToPbMetrics(vdl, ae.metricNamePerfix)
+ if len(protoMetrics) == 0 {
+ return
+ }
+ req := &agentmetricspb.ExportMetricsServiceRequest{
+ Metrics: protoMetrics,
+ Resource: ae.resource,
+ // TODO:(@odeke-em)
+ // a) Figure out how to derive a Node from the environment
+ // or better letting users of the exporter configure it.
+ }
+ ae.ExportMetricsServiceRequest(req)
+}
+
+func (ae *Exporter) Flush() {
+ ae.traceBundler.Flush()
+ ae.viewDataBundler.Flush()
+}
+
+func resourceProtoFromEnv() *resourcepb.Resource {
+ rs, _ := resource.FromEnv(context.Background())
+ if rs == nil {
+ return nil
+ }
+ return resourceToResourcePb(rs)
+}
+
+func resourceToResourcePb(rs *resource.Resource) *resourcepb.Resource {
+ rprs := &resourcepb.Resource{
+ Type: rs.Type,
+ }
+ if rs.Labels != nil {
+ rprs.Labels = make(map[string]string)
+ for k, v := range rs.Labels {
+ rprs.Labels[k] = v
+ }
+ }
+ return rprs
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
new file mode 100644
index 0000000000..148a564575
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
@@ -0,0 +1,206 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "time"
+
+ "go.opencensus.io/resource"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+const (
+ DefaultAgentPort uint16 = 55678
+ DefaultAgentHost string = "localhost"
+)
+
+type ExporterOption interface {
+ withExporter(e *Exporter)
+}
+
+type resourceDetector resource.Detector
+
+var _ ExporterOption = (*resourceDetector)(nil)
+
+func (rd resourceDetector) withExporter(e *Exporter) {
+ e.resourceDetector = resource.Detector(rd)
+}
+
+// WithResourceDetector allows one to register a resource detector. Resource Detector is used
+// to detect resources associated with the application. Detected resource is exported
+// along with the metrics. If the detector fails then it panics.
+// If a resource detector is not provided then by default it detects from the environment.
+func WithResourceDetector(rd resource.Detector) ExporterOption {
+ return resourceDetector(rd)
+}
+
+type insecureGrpcConnection int
+
+var _ ExporterOption = (*insecureGrpcConnection)(nil)
+
+func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
+ e.canDialInsecure = true
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC connection
+// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
+// does. Note, by default, client security is required unless WithInsecure is used.
+func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
+
+type addressSetter string
+
+func (as addressSetter) withExporter(e *Exporter) {
+ e.agentAddress = string(as)
+}
+
+var _ ExporterOption = (*addressSetter)(nil)
+
+// WithAddress allows one to set the address that the exporter will
+// connect to the agent on. If unset, it will instead try to use
+// connect to DefaultAgentHost:DefaultAgentPort
+func WithAddress(addr string) ExporterOption {
+ return addressSetter(addr)
+}
+
+type serviceNameSetter string
+
+func (sns serviceNameSetter) withExporter(e *Exporter) {
+ e.serviceName = string(sns)
+}
+
+var _ ExporterOption = (*serviceNameSetter)(nil)
+
+// WithServiceName allows one to set/override the service name
+// that the exporter will report to the agent.
+func WithServiceName(serviceName string) ExporterOption {
+ return serviceNameSetter(serviceName)
+}
+
+type reconnectionPeriod time.Duration
+
+func (rp reconnectionPeriod) withExporter(e *Exporter) {
+ e.reconnectionPeriod = time.Duration(rp)
+}
+
+func WithReconnectionPeriod(rp time.Duration) ExporterOption {
+ return reconnectionPeriod(rp)
+}
+
+type compressorSetter string
+
+func (c compressorSetter) withExporter(e *Exporter) {
+ e.compressor = string(c)
+}
+
+// UseCompressor will set the compressor for the gRPC client to use when sending requests.
+// It is the responsibility of the caller to ensure that the compressor set has been registered
+// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
+// compressors auto-register on import, such as gzip, which can be registered by calling
+// `import _ "google.golang.org/grpc/encoding/gzip"`
+func UseCompressor(compressorName string) ExporterOption {
+ return compressorSetter(compressorName)
+}
+
+type headerSetter map[string]string
+
+func (h headerSetter) withExporter(e *Exporter) {
+ e.headers = map[string]string(h)
+}
+
+// WithHeaders will send the provided headers when the gRPC stream connection
+// is instantiated
+func WithHeaders(headers map[string]string) ExporterOption {
+ return headerSetter(headers)
+}
+
+type clientCredentials struct {
+ credentials.TransportCredentials
+}
+
+var _ ExporterOption = (*clientCredentials)(nil)
+
+// WithTLSCredentials allows the connection to use TLS credentials
+// when talking to the server. It takes in grpc.TransportCredentials instead
+// of say a Certificate file or a tls.Certificate, because the retrieving
+// these credentials can be done in many ways e.g. plain file, in code tls.Config
+// or by certificate rotation, so it is up to the caller to decide what to use.
+func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
+ return &clientCredentials{TransportCredentials: creds}
+}
+
+func (cc *clientCredentials) withExporter(e *Exporter) {
+ e.clientTransportCredentials = cc.TransportCredentials
+}
+
+type grpcDialOptions []grpc.DialOption
+
+var _ ExporterOption = (*grpcDialOptions)(nil)
+
+// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts
+// with some other configuration the GRPC specified via the agent the ones here will
+// take preference since they are set last.
+func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption {
+ return grpcDialOptions(opts)
+}
+
+func (opts grpcDialOptions) withExporter(e *Exporter) {
+ e.grpcDialOptions = opts
+}
+
+type metricNamePrefixSetter string
+
+var _ ExporterOption = (*metricNamePrefixSetter)(nil)
+
+func (p metricNamePrefixSetter) withExporter(e *Exporter) {
+ e.metricNamePerfix = string(p)
+}
+
+// WithMetricNamePrefix provides an option for the caller to add a prefix to metric names.
+func WithMetricNamePrefix(prefix string) ExporterOption {
+ return metricNamePrefixSetter(prefix)
+}
+
+type dataBundlerOptions struct {
+ delay time.Duration
+ count int
+}
+
+var _ ExporterOption = (*dataBundlerOptions)(nil)
+
+func (b dataBundlerOptions) withExporter(e *Exporter) {
+ if b.delay > 0 {
+ e.viewDataDelay = b.delay
+ }
+ if b.count > 0 {
+ e.viewDataBundleCount = b.count
+ }
+}
+
+// WithDataBundlerOptions provides an option for the caller to configure the metrics data bundler.
+func WithDataBundlerOptions(delay time.Duration, count int) ExporterOption {
+ return dataBundlerOptions{delay, count}
+}
+
+func (spanConfig SpanConfig) withExporter(e *Exporter) {
+ e.spanConfig = spanConfig
+}
+
+var _ ExporterOption = (*SpanConfig)(nil)
+
+// WithSpanConfig allows one to set the AnnotationEventsPerSpan and MessageEventsPerSpan
+func WithSpanConfig(spanConfig SpanConfig) ExporterOption {
+ return spanConfig
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/span_config.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/span_config.go
new file mode 100644
index 0000000000..8d3d60b1d8
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/span_config.go
@@ -0,0 +1,25 @@
+package ocagent
+
+const (
+ maxAnnotationEventsPerSpan = 32
+ maxMessageEventsPerSpan = 128
+)
+
+type SpanConfig struct {
+ AnnotationEventsPerSpan int
+ MessageEventsPerSpan int
+}
+
+func (spanConfig SpanConfig) GetAnnotationEventsPerSpan() int {
+ if spanConfig.AnnotationEventsPerSpan <= 0 {
+ return maxAnnotationEventsPerSpan
+ }
+ return spanConfig.AnnotationEventsPerSpan
+}
+
+func (spanConfig SpanConfig) GetMessageEventsPerSpan() int {
+ if spanConfig.MessageEventsPerSpan <= 0 {
+ return maxMessageEventsPerSpan
+ }
+ return spanConfig.MessageEventsPerSpan
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
new file mode 100644
index 0000000000..409afe1edb
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
@@ -0,0 +1,243 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "math"
+ "time"
+
+ "go.opencensus.io/trace"
+ "go.opencensus.io/trace/tracestate"
+
+ tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
+ "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+func ocSpanToProtoSpan(sd *trace.SpanData, spanConfig SpanConfig) *tracepb.Span {
+ if sd == nil {
+ return nil
+ }
+ var namePtr *tracepb.TruncatableString
+ if sd.Name != "" {
+ namePtr = &tracepb.TruncatableString{Value: sd.Name}
+ }
+ return &tracepb.Span{
+ TraceId: sd.TraceID[:],
+ SpanId: sd.SpanID[:],
+ ParentSpanId: sd.ParentSpanID[:],
+ Status: ocStatusToProtoStatus(sd.Status),
+ StartTime: timeToTimestamp(sd.StartTime),
+ EndTime: timeToTimestamp(sd.EndTime),
+ Links: ocLinksToProtoLinks(sd.Links),
+ Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
+ Name: namePtr,
+ Attributes: ocAttributesToProtoAttributes(sd.Attributes),
+ TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents, spanConfig),
+ Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
+ }
+}
+
+var blankStatus trace.Status
+
+func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
+ if status == blankStatus {
+ return nil
+ }
+ return &tracepb.Status{
+ Code: status.Code,
+ Message: status.Message,
+ }
+}
+
+func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
+ if len(links) == 0 {
+ return nil
+ }
+
+ sl := make([]*tracepb.Span_Link, 0, len(links))
+ for _, ocLink := range links {
+ // This redefinition is necessary to prevent ocLink.*ID[:] copies
+ // being reused -- in short we need a new ocLink per iteration.
+ ocLink := ocLink
+
+ sl = append(sl, &tracepb.Span_Link{
+ TraceId: ocLink.TraceID[:],
+ SpanId: ocLink.SpanID[:],
+ Type: ocLinkTypeToProtoLinkType(ocLink.Type),
+ })
+ }
+
+ return &tracepb.Span_Links{
+ Link: sl,
+ }
+}
+
+func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
+ switch oct {
+ case trace.LinkTypeChild:
+ return tracepb.Span_Link_CHILD_LINKED_SPAN
+ case trace.LinkTypeParent:
+ return tracepb.Span_Link_PARENT_LINKED_SPAN
+ default:
+ return tracepb.Span_Link_TYPE_UNSPECIFIED
+ }
+}
+
+func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
+ if len(attrs) == 0 {
+ return nil
+ }
+ outMap := make(map[string]*tracepb.AttributeValue)
+ for k, v := range attrs {
+ switch v := v.(type) {
+ case bool:
+ outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
+
+ case int:
+ outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
+
+ case int64:
+ outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
+
+ case string:
+ outMap[k] = &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{
+ StringValue: &tracepb.TruncatableString{Value: v},
+ },
+ }
+ }
+ }
+ return &tracepb.Span_Attributes{
+ AttributeMap: outMap,
+ }
+}
+
+// This code is mostly copied from
+// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
+func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent, spanConfig SpanConfig) *tracepb.Span_TimeEvents {
+ if len(as) == 0 && len(es) == 0 {
+ return nil
+ }
+
+ timeEvents := &tracepb.Span_TimeEvents{}
+ var annotations, droppedAnnotationsCount int
+ var messageEvents, droppedMessageEventsCount int
+
+ // Transform annotations
+ for i, a := range as {
+ if annotations >= spanConfig.GetAnnotationEventsPerSpan() {
+ droppedAnnotationsCount = len(as) - i
+ break
+ }
+ annotations++
+ timeEvents.TimeEvent = append(timeEvents.TimeEvent,
+ &tracepb.Span_TimeEvent{
+ Time: timeToTimestamp(a.Time),
+ Value: transformAnnotationToTimeEvent(&a),
+ },
+ )
+ }
+
+ // Transform message events
+ for i, e := range es {
+ if messageEvents >= spanConfig.GetMessageEventsPerSpan() {
+ droppedMessageEventsCount = len(es) - i
+ break
+ }
+ messageEvents++
+ timeEvents.TimeEvent = append(timeEvents.TimeEvent,
+ &tracepb.Span_TimeEvent{
+ Time: timeToTimestamp(e.Time),
+ Value: transformMessageEventToTimeEvent(&e),
+ },
+ )
+ }
+
+ // Process dropped counter
+ timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
+ timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
+
+ return timeEvents
+}
+
+func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
+ return &tracepb.Span_TimeEvent_Annotation_{
+ Annotation: &tracepb.Span_TimeEvent_Annotation{
+ Description: &tracepb.TruncatableString{Value: a.Message},
+ Attributes: ocAttributesToProtoAttributes(a.Attributes),
+ },
+ }
+}
+
+func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
+ return &tracepb.Span_TimeEvent_MessageEvent_{
+ MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
+ Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
+ Id: uint64(e.MessageID),
+ UncompressedSize: uint64(e.UncompressedByteSize),
+ CompressedSize: uint64(e.CompressedByteSize),
+ },
+ }
+}
+
+// clip32 clips an int to the range of an int32.
+func clip32(x int) int32 {
+ if x < math.MinInt32 {
+ return math.MinInt32
+ }
+ if x > math.MaxInt32 {
+ return math.MaxInt32
+ }
+ return int32(x)
+}
+
+func timeToTimestamp(t time.Time) *timestamp.Timestamp {
+ nanoTime := t.UnixNano()
+ return ×tamp.Timestamp{
+ Seconds: nanoTime / 1e9,
+ Nanos: int32(nanoTime % 1e9),
+ }
+}
+
+func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
+ switch kind {
+ case trace.SpanKindClient:
+ return tracepb.Span_CLIENT
+ case trace.SpanKindServer:
+ return tracepb.Span_SERVER
+ default:
+ return tracepb.Span_SPAN_KIND_UNSPECIFIED
+ }
+}
+
+func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
+ if ts == nil {
+ return nil
+ }
+ return &tracepb.Span_Tracestate{
+ Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
+ }
+}
+
+func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
+ protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
+ for _, entry := range entries {
+ protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
+ Key: entry.Key,
+ Value: entry.Value,
+ })
+ }
+ return protoEntries
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
new file mode 100644
index 0000000000..4516091252
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
@@ -0,0 +1,278 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "errors"
+ "time"
+
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+
+ "github.com/golang/protobuf/ptypes/timestamp"
+
+ metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
+)
+
+var (
+ errNilMeasure = errors.New("expecting a non-nil stats.Measure")
+ errNilView = errors.New("expecting a non-nil view.View")
+ errNilViewData = errors.New("expecting a non-nil view.Data")
+)
+
+func viewDataToMetric(vd *view.Data, metricNamePrefix string) (*metricspb.Metric, error) {
+ if vd == nil {
+ return nil, errNilViewData
+ }
+
+ descriptor, err := viewToMetricDescriptor(vd.View, metricNamePrefix)
+ if err != nil {
+ return nil, err
+ }
+
+ timeseries, err := viewDataToTimeseries(vd)
+ if err != nil {
+ return nil, err
+ }
+
+ metric := &metricspb.Metric{
+ MetricDescriptor: descriptor,
+ Timeseries: timeseries,
+ }
+ return metric, nil
+}
+
+func viewToMetricDescriptor(v *view.View, metricNamePrefix string) (*metricspb.MetricDescriptor, error) {
+ if v == nil {
+ return nil, errNilView
+ }
+ if v.Measure == nil {
+ return nil, errNilMeasure
+ }
+
+ name := stringOrCall(v.Name, v.Measure.Name)
+ if len(metricNamePrefix) > 0 {
+ name = metricNamePrefix + "/" + name
+ }
+ desc := &metricspb.MetricDescriptor{
+ Name: name,
+ Description: stringOrCall(v.Description, v.Measure.Description),
+ Unit: v.Measure.Unit(),
+ Type: aggregationToMetricDescriptorType(v),
+ LabelKeys: tagKeysToLabelKeys(v.TagKeys),
+ }
+ return desc, nil
+}
+
+func stringOrCall(first string, call func() string) string {
+ if first != "" {
+ return first
+ }
+ return call()
+}
+
+type measureType uint
+
+const (
+ measureUnknown measureType = iota
+ measureInt64
+ measureFloat64
+)
+
+func measureTypeFromMeasure(m stats.Measure) measureType {
+ switch m.(type) {
+ default:
+ return measureUnknown
+ case *stats.Float64Measure:
+ return measureFloat64
+ case *stats.Int64Measure:
+ return measureInt64
+ }
+}
+
+func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
+ if v == nil || v.Aggregation == nil {
+ return metricspb.MetricDescriptor_UNSPECIFIED
+ }
+ if v.Measure == nil {
+ return metricspb.MetricDescriptor_UNSPECIFIED
+ }
+
+ switch v.Aggregation.Type {
+ case view.AggTypeCount:
+ // Cumulative on int64
+ return metricspb.MetricDescriptor_CUMULATIVE_INT64
+
+ case view.AggTypeDistribution:
+ // Cumulative types
+ return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
+
+ case view.AggTypeLastValue:
+ // Gauge types
+ switch measureTypeFromMeasure(v.Measure) {
+ case measureFloat64:
+ return metricspb.MetricDescriptor_GAUGE_DOUBLE
+ case measureInt64:
+ return metricspb.MetricDescriptor_GAUGE_INT64
+ }
+
+ case view.AggTypeSum:
+ // Cumulative types
+ switch measureTypeFromMeasure(v.Measure) {
+ case measureFloat64:
+ return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
+ case measureInt64:
+ return metricspb.MetricDescriptor_CUMULATIVE_INT64
+ }
+ }
+
+ // For all other cases, return unspecified.
+ return metricspb.MetricDescriptor_UNSPECIFIED
+}
+
+func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
+ labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
+ for _, tagKey := range tagKeys {
+ labelKeys = append(labelKeys, &metricspb.LabelKey{
+ Key: tagKey.Name(),
+ })
+ }
+ return labelKeys
+}
+
+func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
+ if vd == nil || len(vd.Rows) == 0 {
+ return nil, nil
+ }
+
+ // Given that view.Data only contains Start, End
+ // the timestamps for all the row data will be the exact same
+ // per aggregation. However, the values will differ.
+ // Each row has its own tags.
+ startTimestamp := timeToProtoTimestamp(vd.Start)
+ endTimestamp := timeToProtoTimestamp(vd.End)
+
+ mType := measureTypeFromMeasure(vd.View.Measure)
+ timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
+ // It is imperative that the ordering of "LabelValues" matches those
+ // of the Label keys in the metric descriptor.
+ for _, row := range vd.Rows {
+ labelValues := labelValuesFromTags(row.Tags)
+ point := rowToPoint(vd.View, row, endTimestamp, mType)
+ timeseries = append(timeseries, &metricspb.TimeSeries{
+ StartTimestamp: startTimestamp,
+ LabelValues: labelValues,
+ Points: []*metricspb.Point{point},
+ })
+ }
+
+ if len(timeseries) == 0 {
+ return nil, nil
+ }
+
+ return timeseries, nil
+}
+
+func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
+ unixNano := t.UnixNano()
+ return ×tamp.Timestamp{
+ Seconds: int64(unixNano / 1e9),
+ Nanos: int32(unixNano % 1e9),
+ }
+}
+
+func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
+ pt := &metricspb.Point{
+ Timestamp: endTimestamp,
+ }
+
+ switch data := row.Data.(type) {
+ case *view.CountData:
+ pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
+
+ case *view.DistributionData:
+ pt.Value = &metricspb.Point_DistributionValue{
+ DistributionValue: &metricspb.DistributionValue{
+ Count: data.Count,
+ Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
+ // TODO: Add Exemplar
+ Buckets: bucketsToProtoBuckets(data.CountPerBucket),
+ BucketOptions: &metricspb.DistributionValue_BucketOptions{
+ Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
+ Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
+ Bounds: v.Aggregation.Buckets,
+ },
+ },
+ },
+ SumOfSquaredDeviation: data.SumOfSquaredDev,
+ }}
+
+ case *view.LastValueData:
+ setPointValue(pt, data.Value, mType)
+
+ case *view.SumData:
+ setPointValue(pt, data.Value, mType)
+ }
+
+ return pt
+}
+
+// Not returning anything from this function because metricspb.Point.is_Value is an unexported
+// interface hence we just have to set its value by pointer.
+func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
+ if mType == measureInt64 {
+ pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
+ } else {
+ pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
+ }
+}
+
+func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
+ distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
+ for i := 0; i < len(countPerBucket); i++ {
+ count := countPerBucket[i]
+
+ distBuckets[i] = &metricspb.DistributionValue_Bucket{
+ Count: count,
+ }
+ }
+
+ return distBuckets
+}
+
+func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
+ if len(tags) == 0 {
+ return nil
+ }
+
+ labelValues := make([]*metricspb.LabelValue, 0, len(tags))
+ for _, tag_ := range tags {
+ labelValues = append(labelValues, &metricspb.LabelValue{
+ Value: tag_.Value,
+
+ // It is imperative that we set the "HasValue" attribute,
+ // in order to distinguish missing a label from the empty string.
+ // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
+ //
+ // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
+ // so the best case that we can use to distinguish missing labels/tags from the
+ // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
+ // a value.
+ HasValue: tag_.Key.Name() != "",
+ })
+ }
+ return labelValues
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
new file mode 100644
index 0000000000..68be4c75bd
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
@@ -0,0 +1,17 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+const Version = "0.0.1"
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore b/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore
new file mode 100644
index 0000000000..85e7c1dfcb
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore
@@ -0,0 +1 @@
+/.idea/
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml
new file mode 100644
index 0000000000..0aa9844f42
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml
@@ -0,0 +1,123 @@
+# options for analysis running
+run:
+ # default concurrency is a available CPU number
+ concurrency: 4
+
+ # timeout for analysis, e.g. 30s, 5m, default is 1m
+ timeout: 10m
+
+ # exit code when at least one issue was found, default is 1
+ issues-exit-code: 1
+
+ # include test files or not, default is true
+ tests: true
+
+ # which dirs to skip: issues from them won't be reported;
+ # can use regexp here: generated.*, regexp is applied on full path;
+ # default value is empty list, but default dirs are skipped independently
+ # from this option's value (see skip-dirs-use-default).
+ skip-dirs:
+
+ # default is true. Enables skipping of directories:
+ # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+ skip-dirs-use-default: false
+
+ # which files to skip: they will be analyzed, but issues from them
+ # won't be reported. Default value is empty list, but there is
+ # no need to include all autogenerated files, we confidently recognize
+ # autogenerated files. If it's not please let us know.
+ skip-files:
+
+ # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
+ # If invoked with -mod=readonly, the go command is disallowed from the implicit
+ # automatic updating of go.mod described above. Instead, it fails when any changes
+ # to go.mod are needed. This setting is most useful to check that go.mod does
+ # not need updates, such as in a continuous integration and testing system.
+ # If invoked with -mod=vendor, the go command assumes that the vendor
+ # directory holds the correct copies of dependencies and ignores
+ # the dependency descriptions in go.mod.
+ modules-download-mode: readonly
+
+# output configuration options
+output:
+ # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
+ format: colored-line-number
+
+ # print lines of code with issue, default is true
+ print-issued-lines: true
+
+ # print linter name in the end of issue text, default is true
+ print-linter-name: true
+
+# all available settings of specific linters
+linters-settings:
+ govet:
+ # report about shadowed variables
+ check-shadowing: true
+
+ # settings per analyzer
+ settings:
+ printf: # analyzer name, run `go tool vet help` to see all analyzers
+ funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
+ - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
+ - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
+ - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
+ - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
+
+ enable-all: true
+ # TODO: Enable this and fix the alignment issues.
+ disable:
+ - fieldalignment
+
+ golint:
+ # minimal confidence for issues, default is 0.8
+ min-confidence: 0.8
+
+ gofmt:
+ # simplify code: gofmt with `-s` option, true by default
+ simplify: true
+
+ goimports:
+ # put imports beginning with prefix after 3rd-party packages;
+ # it's a comma-separated list of prefixes
+ local-prefixes: contrib.go.opencensus.io/exporter/prometheus
+
+ misspell:
+ # Correct spellings using locale preferences for US or UK.
+ # Default is to use a neutral variety of English.
+ # Setting locale to US will correct the British spelling of 'colour' to 'color'.
+ locale: US
+ ignore-words:
+ - cancelled
+ - metre
+ - meter
+ - metres
+ - kilometre
+ - kilometres
+
+linters:
+ disable:
+ - errcheck
+ enable:
+ - gofmt
+ - goimports
+ - golint
+ - gosec
+ - govet
+ - staticcheck
+ - misspell
+ - scopelint
+ - unconvert
+ - gocritic
+ - unparam
+
+issues:
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ # Exclude some linters from running on tests files.
+ - path: _test\.go
+ linters:
+ - scopelint
+ - text: "G404:"
+ linters:
+ - gosec
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml
new file mode 100644
index 0000000000..17afafec2b
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go_import_path: contrib.go.opencensus.io
+
+go:
+ - 1.15.x
+
+env:
+ global:
+ GO111MODULE=on
+
+before_script:
+ - make install-tools
+
+script:
+ - make travis-ci
+
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE b/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile
new file mode 100644
index 0000000000..cf4d613281
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile
@@ -0,0 +1,50 @@
+# TODO: Fix this on windows.
+ALL_SRC := $(shell find . -name '*.go' \
+ -not -path './vendor/*' \
+ -not -path '*/gen-go/*' \
+ -type f | sort)
+ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
+
+GOTEST_OPT?=-v -race -timeout 30s
+GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic
+GOTEST=go test
+LINT=golangci-lint
+# TODO decide if we need to change these names.
+README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ')
+
+.DEFAULT_GOAL := lint-test
+
+.PHONY: lint-test
+lint-test: lint test
+
+# TODO enable test-with-coverage in travis
+.PHONY: travis-ci
+travis-ci: lint test test-386
+
+all-pkgs:
+ @echo $(ALL_PKGS) | tr ' ' '\n' | sort
+
+all-srcs:
+ @echo $(ALL_SRC) | tr ' ' '\n' | sort
+
+.PHONY: test
+test:
+ $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS)
+
+.PHONY: test-386
+test-386:
+ GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS)
+
+.PHONY: test-with-coverage
+test-with-coverage:
+ $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
+
+.PHONY: lint
+lint:
+ $(LINT) run --allow-parallel-runners
+
+.PHONY: install-tools
+install-tools:
+ cd internal/tools && go install golang.org/x/tools/cmd/cover
+ cd internal/tools && go install github.com/golangci/golangci-lint/cmd/golangci-lint
+
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md b/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md
new file mode 100644
index 0000000000..3a9c5d3c8e
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md
@@ -0,0 +1,14 @@
+# OpenCensus Go Prometheus Exporter
+
+[![Build Status](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-prometheus.svg?branch=master)](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-prometheus) [![GoDoc][godoc-image]][godoc-url]
+
+Provides OpenCensus metrics export support for Prometheus.
+
+## Installation
+
+```
+$ go get -u contrib.go.opencensus.io/exporter/prometheus
+```
+
+[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus?status.svg
+[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go
new file mode 100644
index 0000000000..b94c6d3991
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go
@@ -0,0 +1,303 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus contains a Prometheus exporter that supports exporting
+// OpenCensus views as Prometheus metrics.
+package prometheus // import "contrib.go.opencensus.io/exporter/prometheus"
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "go.opencensus.io/metric/metricdata"
+ "go.opencensus.io/metric/metricexport"
+ "go.opencensus.io/stats/view"
+)
+
+// Exporter exports stats to Prometheus, users need
+// to register the exporter as an http.Handler to be
+// able to export.
+type Exporter struct {
+ opts Options
+ g prometheus.Gatherer
+ c *collector
+ handler http.Handler
+}
+
+// Options contains options for configuring the exporter.
+type Options struct {
+ Namespace string
+ Registry *prometheus.Registry
+ Registerer prometheus.Registerer
+ Gatherer prometheus.Gatherer
+ OnError func(err error)
+ ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views.
+}
+
+// NewExporter returns an exporter that exports stats to Prometheus.
+func NewExporter(o Options) (*Exporter, error) {
+ if o.Registry == nil {
+ o.Registry = prometheus.NewRegistry()
+ }
+ if o.Registerer == nil {
+ o.Registerer = o.Registry
+ }
+ if o.Gatherer == nil {
+ o.Gatherer = o.Registry
+ }
+
+ collector := newCollector(o, o.Registerer)
+ e := &Exporter{
+ opts: o,
+ g: o.Gatherer,
+ c: collector,
+ handler: promhttp.HandlerFor(o.Gatherer, promhttp.HandlerOpts{}),
+ }
+ collector.ensureRegisteredOnce()
+
+ return e, nil
+}
+
+var _ http.Handler = (*Exporter)(nil)
+
+// ensureRegisteredOnce invokes reg.Register on the collector itself
+// exactly once to ensure that we don't get errors such as
+// cannot register the collector: descriptor Desc{fqName: *}
+// already exists with the same fully-qualified name and const label values
+// which is documented by Prometheus at
+// https://github.com/prometheus/client_golang/blob/fcc130e101e76c5d303513d0e28f4b6d732845c7/prometheus/registry.go#L89-L101
+func (c *collector) ensureRegisteredOnce() {
+ c.registerOnce.Do(func() {
+ if err := c.reg.Register(c); err != nil {
+ c.opts.onError(fmt.Errorf("cannot register the collector: %v", err))
+ }
+ })
+
+}
+
+func (o *Options) onError(err error) {
+ if o.OnError != nil {
+ o.OnError(err)
+ } else {
+ log.Printf("Failed to export to Prometheus: %v", err)
+ }
+}
+
+// ExportView exports to the Prometheus if view data has one or more rows.
+// Each OpenCensus AggregationData will be converted to
+// corresponding Prometheus Metric: SumData will be converted
+// to Untyped Metric, CountData will be a Counter Metric,
+// DistributionData will be a Histogram Metric.
+//
+// Deprecated: in lieu of metricexport.Reader interface.
+func (e *Exporter) ExportView(vd *view.Data) {
+}
+
+// ServeHTTP serves the Prometheus endpoint.
+func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ e.handler.ServeHTTP(w, r)
+}
+
+// collector implements prometheus.Collector
+type collector struct {
+ opts Options
+
+ registerOnce sync.Once
+
+ // reg helps collector register views dynamically.
+ reg prometheus.Registerer
+
+ // reader reads metrics from all registered producers.
+ reader *metricexport.Reader
+}
+
+func (c *collector) Describe(ch chan<- *prometheus.Desc) {
+ de := &descExporter{c: c, descCh: ch}
+ c.reader.ReadAndExport(de)
+}
+
+// Collect fetches the statistics from OpenCensus
+// and delivers them as Prometheus Metrics.
+// Collect is invoked every time a prometheus.Gatherer is run
+// for example when the HTTP endpoint is invoked by Prometheus.
+func (c *collector) Collect(ch chan<- prometheus.Metric) {
+ me := &metricExporter{c: c, metricCh: ch}
+ c.reader.ReadAndExport(me)
+}
+
+func newCollector(opts Options, registrar prometheus.Registerer) *collector {
+ return &collector{
+ reg: registrar,
+ opts: opts,
+ reader: metricexport.NewReader()}
+}
+
+func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc {
+ var labels prometheus.Labels
+ switch {
+ case metric.Resource == nil:
+ labels = c.opts.ConstLabels
+ case c.opts.ConstLabels == nil:
+ labels = metric.Resource.Labels
+ default:
+ labels = prometheus.Labels{}
+ for k, v := range c.opts.ConstLabels {
+ labels[k] = v
+ }
+ // Resource labels overwrite const labels.
+ for k, v := range metric.Resource.Labels {
+ labels[k] = v
+ }
+ }
+
+ return prometheus.NewDesc(
+ metricName(c.opts.Namespace, metric),
+ metric.Descriptor.Description,
+ toPromLabels(metric.Descriptor.LabelKeys),
+ labels)
+}
+
+type metricExporter struct {
+ c *collector
+ metricCh chan<- prometheus.Metric
+}
+
+// ExportMetrics exports to the Prometheus.
+// Each OpenCensus Metric will be converted to
+// corresponding Prometheus Metric:
+// TypeCumulativeInt64 and TypeCumulativeFloat64 will be a Counter Metric,
+// TypeCumulativeDistribution will be a Histogram Metric.
+// TypeGaugeFloat64 and TypeGaugeInt64 will be a Gauge Metric
+func (me *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error {
+ for _, metric := range metrics {
+ desc := me.c.toDesc(metric)
+ for _, ts := range metric.TimeSeries {
+ tvs := toLabelValues(ts.LabelValues)
+ for _, point := range ts.Points {
+ metric, err := toPromMetric(desc, metric, point, tvs)
+ if err != nil {
+ me.c.opts.onError(err)
+ } else if metric != nil {
+ me.metricCh <- metric
+ }
+ }
+ }
+ }
+ return nil
+}
+
+type descExporter struct {
+ c *collector
+ descCh chan<- *prometheus.Desc
+}
+
+// ExportMetrics exports descriptor to the Prometheus.
+// It is invoked when request to scrape descriptors is received.
+func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error {
+ for _, metric := range metrics {
+ desc := me.c.toDesc(metric)
+ me.descCh <- desc
+ }
+ return nil
+}
+
+func toPromLabels(mls []metricdata.LabelKey) (labels []string) {
+ for _, ml := range mls {
+ labels = append(labels, sanitize(ml.Key))
+ }
+ return labels
+}
+
+func metricName(namespace string, m *metricdata.Metric) string {
+ var name string
+ if namespace != "" {
+ name = namespace + "_"
+ }
+ return name + sanitize(m.Descriptor.Name)
+}
+
+func toPromMetric(
+ desc *prometheus.Desc,
+ metric *metricdata.Metric,
+ point metricdata.Point,
+ labelValues []string) (prometheus.Metric, error) {
+ switch metric.Descriptor.Type {
+ case metricdata.TypeCumulativeFloat64, metricdata.TypeCumulativeInt64:
+ pv, err := toPromValue(point)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.NewConstMetric(desc, prometheus.CounterValue, pv, labelValues...)
+
+ case metricdata.TypeGaugeFloat64, metricdata.TypeGaugeInt64:
+ pv, err := toPromValue(point)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.NewConstMetric(desc, prometheus.GaugeValue, pv, labelValues...)
+
+ case metricdata.TypeCumulativeDistribution:
+ switch v := point.Value.(type) {
+ case *metricdata.Distribution:
+ points := make(map[float64]uint64)
+ // Histograms are cumulative in Prometheus.
+ // Get cumulative bucket counts.
+ cumCount := uint64(0)
+ for i, b := range v.BucketOptions.Bounds {
+ cumCount += uint64(v.Buckets[i].Count)
+ points[b] = cumCount
+ }
+ return prometheus.NewConstHistogram(desc, uint64(v.Count), v.Sum, points, labelValues...)
+ default:
+ return nil, typeMismatchError(point)
+ }
+ case metricdata.TypeSummary:
+ // TODO: [rghetia] add support for TypeSummary.
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("aggregation %T is not yet supported", metric.Descriptor.Type)
+ }
+}
+
+func toLabelValues(labelValues []metricdata.LabelValue) (values []string) {
+ for _, lv := range labelValues {
+ if lv.Present {
+ values = append(values, lv.Value)
+ } else {
+ values = append(values, "")
+ }
+ }
+ return values
+}
+
+func typeMismatchError(point metricdata.Point) error {
+ return fmt.Errorf("point type %T does not match metric type", point)
+
+}
+
+func toPromValue(point metricdata.Point) (float64, error) {
+ switch v := point.Value.(type) {
+ case float64:
+ return v, nil
+ case int64:
+ return float64(v), nil
+ default:
+ return 0.0, typeMismatchError(point)
+ }
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go
new file mode 100644
index 0000000000..9c9a9c4dd7
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go
@@ -0,0 +1,38 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "github.com/prometheus/statsd_exporter/pkg/mapper"
+)
+
+const labelKeySizeLimit = 100
+
+// sanitize returns a string that is trunacated to 100 characters if it's too
+// long, and replaces non-alphanumeric characters to underscores.
+func sanitize(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ if len(s) > labelKeySizeLimit {
+ s = s[:labelKeySizeLimit]
+ }
+
+ s = mapper.EscapeMetricName(s)
+ if s[0] == '_' {
+ s = "key" + s
+ }
+ return s
+}
diff --git a/vendor/cuelang.org/go/AUTHORS b/vendor/cuelang.org/go/AUTHORS
new file mode 100644
index 0000000000..884392fca0
--- /dev/null
+++ b/vendor/cuelang.org/go/AUTHORS
@@ -0,0 +1,6 @@
+# This is the list of CUE authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+Google LLC
diff --git a/vendor/cuelang.org/go/LICENSE b/vendor/cuelang.org/go/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/cuelang.org/go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cuelang.org/go/cue/ast/ast.go b/vendor/cuelang.org/go/cue/ast/ast.go
new file mode 100644
index 0000000000..6018808abd
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/ast.go
@@ -0,0 +1,1057 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ast declares the types used to represent syntax trees for CUE
+// packages.
+package ast // import "cuelang.org/go/cue/ast"
+
+import (
+ "fmt"
+ "strings"
+
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+)
+
+// ----------------------------------------------------------------------------
+// Interfaces
+//
+// There are three main classes of nodes: expressions, clauses, and declaration
+// nodes. The node names usually match the corresponding CUE spec production
+// names to which they correspond. The node fields correspond to the individual
+// parts of the respective productions.
+//
+// All nodes contain position information marking the beginning of the
+// corresponding source text segment; it is accessible via the Pos accessor
+// method. Nodes may contain additional position info for language constructs
+// where comments may be found between parts of the construct (typically any
+// larger, parenthesized subpart). That position information is needed to
+// properly position comments when printing the construct.
+
+// A Node represents any node in the abstract syntax tree.
+type Node interface {
+ Pos() token.Pos // position of first character belonging to the node
+ End() token.Pos // position of first character immediately after the node
+
+ // pos reports the pointer to the position of first character belonging to
+ // the node or nil if there is no such position.
+ pos() *token.Pos
+
+ // Deprecated: use ast.Comments
+ Comments() []*CommentGroup
+
+ // Deprecated: use ast.AddComment
+ AddComment(*CommentGroup)
+ commentInfo() *comments
+}
+
+// Name describes the type of n.
+func Name(n Node) string {
+ s := fmt.Sprintf("%T", n)
+ return strings.ToLower(s[strings.Index(s, "ast.")+4:])
+}
+
+func getPos(n Node) token.Pos {
+ p := n.pos()
+ if p == nil {
+ return token.NoPos
+ }
+ return *p
+}
+
+// SetPos sets a node to the given position, if possible.
+func SetPos(n Node, p token.Pos) {
+ ptr := n.pos()
+ if ptr == nil {
+ return
+ }
+ *ptr = p
+}
+
+// SetRelPos sets the relative position of a node without modifying its
+// file position. Setting it to token.NoRelPos allows a node to adopt default
+// formatting.
+func SetRelPos(n Node, p token.RelPos) {
+ ptr := n.pos()
+ if ptr == nil {
+ return
+ }
+ pos := *ptr
+ *ptr = pos.WithRel(p)
+}
+
+// An Expr is implemented by all expression nodes.
+type Expr interface {
+ Node
+ declNode() // An expression can be used as a declaration.
+ exprNode()
+}
+
+type expr struct{ decl }
+
+func (expr) exprNode() {}
+
+// A Decl node is implemented by all declarations.
+type Decl interface {
+ Node
+ declNode()
+}
+
+type decl struct{}
+
+func (decl) declNode() {}
+
+// A Label is any production that can be used as a LHS label.
+type Label interface {
+ Node
+ labelNode()
+}
+
+type label struct{}
+
+func (l label) labelNode() {}
+
+// Clause nodes are part of comprehensions.
+type Clause interface {
+ Node
+ clauseNode()
+}
+
+type clause struct{}
+
+func (clause) clauseNode() {}
+
+func (x *ForClause) clauseNode() {}
+func (x *IfClause) clauseNode() {}
+func (x *Alias) clauseNode() {}
+
+// Comments
+
+type comments struct {
+ groups *[]*CommentGroup
+}
+
+func (c *comments) commentInfo() *comments { return c }
+
+func (c *comments) Comments() []*CommentGroup {
+ if c.groups == nil {
+ return []*CommentGroup{}
+ }
+ return *c.groups
+}
+
+// // AddComment adds the given comments to the fields.
+// // If line is true the comment is inserted at the preceding token.
+
+func (c *comments) AddComment(cg *CommentGroup) {
+ if cg == nil {
+ return
+ }
+ if c.groups == nil {
+ a := []*CommentGroup{cg}
+ c.groups = &a
+ return
+ }
+
+ *c.groups = append(*c.groups, cg)
+ a := *c.groups
+ for i := len(a) - 2; i >= 0 && a[i].Position > cg.Position; i-- {
+ a[i], a[i+1] = a[i+1], a[i]
+ }
+}
+
+func (c *comments) SetComments(cgs []*CommentGroup) {
+ if c.groups == nil {
+ a := cgs
+ c.groups = &a
+ return
+ }
+ *c.groups = cgs
+}
+
+// A Comment node represents a single //-style or /*-style comment.
+type Comment struct {
+ Slash token.Pos // position of "/" starting the comment
+ Text string // comment text (excluding '\n' for //-style comments)
+}
+
+func (c *Comment) Comments() []*CommentGroup { return nil }
+func (c *Comment) AddComment(*CommentGroup) {}
+func (c *Comment) commentInfo() *comments { return nil }
+
+func (c *Comment) Pos() token.Pos { return c.Slash }
+func (c *Comment) pos() *token.Pos { return &c.Slash }
+func (c *Comment) End() token.Pos { return c.Slash.Add(len(c.Text)) }
+
+// A CommentGroup represents a sequence of comments
+// with no other tokens and no empty lines between.
+type CommentGroup struct {
+ // TODO: remove and use the token position of the first comment.
+ Doc bool
+ Line bool // true if it is on the same line as the node's end pos.
+
+ // Position indicates where a comment should be attached if a node has
+ // multiple tokens. 0 means before the first token, 1 means before the
+ // second, etc. For instance, for a field, the positions are:
+ // <0> Label <1> ":" <2> Expr <3> "," <4>
+ Position int8
+ List []*Comment // len(List) > 0
+
+ decl
+}
+
+func (g *CommentGroup) Pos() token.Pos { return getPos(g) }
+func (g *CommentGroup) pos() *token.Pos { return g.List[0].pos() }
+func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() }
+
+func (g *CommentGroup) Comments() []*CommentGroup { return nil }
+func (g *CommentGroup) AddComment(*CommentGroup) {}
+func (g *CommentGroup) commentInfo() *comments { return nil }
+
+func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
+
+func stripTrailingWhitespace(s string) string {
+ i := len(s)
+ for i > 0 && isWhitespace(s[i-1]) {
+ i--
+ }
+ return s[0:i]
+}
+
+// Text returns the text of the comment.
+// Comment markers (//, /*, and */), the first space of a line comment, and
+// leading and trailing empty lines are removed. Multiple empty lines are
+// reduced to one, and trailing space on lines is trimmed. Unless the result
+// is empty, it is newline-terminated.
+func (g *CommentGroup) Text() string {
+ if g == nil {
+ return ""
+ }
+ comments := make([]string, len(g.List))
+ for i, c := range g.List {
+ comments[i] = c.Text
+ }
+
+ lines := make([]string, 0, 10) // most comments are less than 10 lines
+ for _, c := range comments {
+ // Remove comment markers.
+ // The parser has given us exactly the comment text.
+ switch c[1] {
+ case '/':
+ //-style comment (no newline at the end)
+ c = c[2:]
+ // strip first space - required for Example tests
+ if len(c) > 0 && c[0] == ' ' {
+ c = c[1:]
+ }
+ case '*':
+ /*-style comment */
+ c = c[2 : len(c)-2]
+ }
+
+ // Split on newlines.
+ cl := strings.Split(c, "\n")
+
+ // Walk lines, stripping trailing white space and adding to list.
+ for _, l := range cl {
+ lines = append(lines, stripTrailingWhitespace(l))
+ }
+ }
+
+ // Remove leading blank lines; convert runs of
+ // interior blank lines to a single blank line.
+ n := 0
+ for _, line := range lines {
+ if line != "" || n > 0 && lines[n-1] != "" {
+ lines[n] = line
+ n++
+ }
+ }
+ lines = lines[0:n]
+
+ // Add final "" entry to get trailing newline from Join.
+ if n > 0 && lines[n-1] != "" {
+ lines = append(lines, "")
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+// An Attribute provides meta data about a field.
+type Attribute struct {
+ At token.Pos
+ Text string // must be a valid attribute format.
+
+ comments
+ decl
+}
+
+func (a *Attribute) Pos() token.Pos { return a.At }
+func (a *Attribute) pos() *token.Pos { return &a.At }
+func (a *Attribute) End() token.Pos { return a.At.Add(len(a.Text)) }
+
+func (a *Attribute) Split() (key, body string) {
+ s := a.Text
+ p := strings.IndexByte(s, '(')
+ if p < 0 || !strings.HasPrefix(s, "@") || !strings.HasSuffix(s, ")") {
+ return "", ""
+ }
+ return a.Text[1:p], a.Text[p+1 : len(s)-1]
+}
+
+// A Field represents a field declaration in a struct.
+type Field struct {
+ Label Label // must have at least one element.
+ Optional token.Pos
+
+ // No TokenPos: Value must be an StructLit with one field.
+ TokenPos token.Pos
+ Token token.Token // ':' or '::', ILLEGAL implies ':'
+
+ Value Expr // the value associated with this field.
+
+ Attrs []*Attribute
+
+ comments
+ decl
+}
+
+func (d *Field) Pos() token.Pos { return d.Label.Pos() }
+func (d *Field) pos() *token.Pos { return d.Label.pos() }
+func (d *Field) End() token.Pos {
+ if len(d.Attrs) > 0 {
+ return d.Attrs[len(d.Attrs)-1].End()
+ }
+ return d.Value.End()
+}
+
+// TODO: make Alias a type of Field. This is possible now we have different
+// separator types.
+
+// An Alias binds another field to the alias name in the current struct.
+type Alias struct {
+ Ident *Ident // field name, always an Ident
+ Equal token.Pos // position of "="
+ Expr Expr // An Ident or SelectorExpr
+
+ comments
+ decl
+ expr
+ label
+}
+
+func (a *Alias) Pos() token.Pos { return a.Ident.Pos() }
+func (a *Alias) pos() *token.Pos { return a.Ident.pos() }
+func (a *Alias) End() token.Pos { return a.Expr.End() }
+
+// A Comprehension node represents a comprehension declaration.
+type Comprehension struct {
+ Clauses []Clause // There must be at least one clause.
+ Value Expr // Must be a struct TODO: change to Struct
+
+ comments
+ decl
+ expr // TODO: only allow Comprehension in "Embedding" productions.
+}
+
+func (x *Comprehension) Pos() token.Pos { return getPos(x) }
+func (x *Comprehension) pos() *token.Pos { return x.Clauses[0].pos() }
+func (x *Comprehension) End() token.Pos {
+ return x.Value.End()
+}
+
+// ----------------------------------------------------------------------------
+// Expressions and types
+//
+// An expression is represented by a tree consisting of one
+// or more of the following concrete expression nodes.
+
+// A BadExpr node is a placeholder for expressions containing
+// syntax errors for which no correct expression nodes can be
+// created. This is different from an ErrorExpr which represents
+// an explicitly marked error in the source.
+type BadExpr struct {
+ From, To token.Pos // position range of bad expression
+
+ comments
+ expr
+}
+
+// A BottomLit indicates an error.
+type BottomLit struct {
+ Bottom token.Pos
+
+ comments
+ expr
+}
+
+// An Ident node represents an left-hand side identifier.
+type Ident struct {
+ NamePos token.Pos // identifier position
+
+ // This LHS path element may be an identifier. Possible forms:
+ // foo: a normal identifier
+ // "foo": JSON compatible
+ Name string
+
+ Scope Node // scope in which node was found or nil if referring directly
+ Node Node
+
+ comments
+ label
+ expr
+}
+
+// A BasicLit node represents a literal of basic type.
+type BasicLit struct {
+ ValuePos token.Pos // literal position
+ Kind token.Token // INT, FLOAT, DURATION, or STRING
+ Value string // literal string; e.g. 42, 0x7f, 3.14, 1_234_567, 1e-9, 2.4i, 'a', '\x7f', "foo", or '\m\n\o'
+
+ comments
+ expr
+ label
+}
+
+// TODO: introduce and use NewLabel and NewBytes and perhaps NewText (in the
+// later case NewString would return a string or bytes type) to distinguish from
+// NewString. Consider how to pass indentation information.
+
+// NewString creates a new BasicLit with a string value without position.
+// It quotes the given string.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewString(str string) *BasicLit {
+ str = literal.String.Quote(str)
+ return &BasicLit{Kind: token.STRING, ValuePos: token.NoPos, Value: str}
+}
+
+// NewNull creates a new BasicLit configured to be a null value.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewNull() *BasicLit {
+ return &BasicLit{Kind: token.NULL, Value: "null"}
+}
+
+// NewLit creates a new BasicLit with from a token type and string without
+// position.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewLit(tok token.Token, s string) *BasicLit {
+ return &BasicLit{Kind: tok, Value: s}
+}
+
+// NewBool creates a new BasicLit with a bool value without position.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewBool(b bool) *BasicLit {
+ x := &BasicLit{}
+ if b {
+ x.Kind = token.TRUE
+ x.Value = "true"
+ } else {
+ x.Kind = token.FALSE
+ x.Value = "false"
+ }
+ return x
+}
+
+// TODO:
+// - use CUE-specific quoting (hoist functionality in export)
+// - NewBytes
+
+// A Interpolation node represents a string or bytes interpolation.
+type Interpolation struct {
+ Elts []Expr // interleaving of strings and expressions.
+
+ comments
+ expr
+ label
+}
+
+// A StructLit node represents a literal struct.
+type StructLit struct {
+ Lbrace token.Pos // position of "{"
+ Elts []Decl // list of elements; or nil
+ Rbrace token.Pos // position of "}"
+
+ comments
+ expr
+}
+
+// NewStruct creates a struct from the given fields.
+//
+// A field is either a *Field, an *Elipsis, *LetClause, a *CommentGroup, or a
+// Label, optionally followed by a a token.OPTION to indicate the field is
+// optional, optionally followed by a token.ISA to indicate the field is a
+// definition followed by an expression for the field value.
+//
+// It will panic if a values not matching these patterns are given. Useful for
+// ASTs generated by code other than the CUE parser.
+func NewStruct(fields ...interface{}) *StructLit {
+ s := &StructLit{
+ // Set default positions so that comment attachment is as expected.
+ Lbrace: token.NoSpace.Pos(),
+ }
+ for i := 0; i < len(fields); i++ {
+ var (
+ label Label
+ optional = token.NoPos
+ tok = token.ILLEGAL
+ expr Expr
+ )
+
+ switch x := fields[i].(type) {
+ case *Field:
+ s.Elts = append(s.Elts, x)
+ continue
+ case *CommentGroup:
+ s.Elts = append(s.Elts, x)
+ continue
+ case *Ellipsis:
+ s.Elts = append(s.Elts, x)
+ continue
+ case *LetClause:
+ s.Elts = append(s.Elts, x)
+ continue
+ case *embedding:
+ s.Elts = append(s.Elts, (*EmbedDecl)(x))
+ continue
+ case Label:
+ label = x
+ case string:
+ label = NewString(x)
+ default:
+ panic(fmt.Sprintf("unsupported label type %T", x))
+ }
+
+ inner:
+ for i++; i < len(fields); i++ {
+ switch x := (fields[i]).(type) {
+ case Expr:
+ expr = x
+ break inner
+ case token.Token:
+ switch x {
+ case token.ISA:
+ tok = x
+ case token.OPTION:
+ optional = token.Blank.Pos()
+ case token.COLON, token.ILLEGAL:
+ default:
+ panic(fmt.Sprintf("invalid token %s", x))
+ }
+ default:
+ panic(fmt.Sprintf("unsupported expression type %T", x))
+ }
+ }
+ if expr == nil {
+ panic("label not matched with expression")
+ }
+ s.Elts = append(s.Elts, &Field{
+ Label: label,
+ Optional: optional,
+ Token: tok,
+ Value: expr,
+ })
+ }
+ return s
+}
+
+// Embed can be used in conjunction with NewStruct to embed values.
+func Embed(x Expr) *embedding {
+ return (*embedding)(&EmbedDecl{Expr: x})
+}
+
+type embedding EmbedDecl
+
+// A ListLit node represents a literal list.
+type ListLit struct {
+ Lbrack token.Pos // position of "["
+
+ // TODO: change to embedding or similar.
+ Elts []Expr // list of composite elements; or nil
+ Rbrack token.Pos // position of "]"
+
+ comments
+ expr
+ label
+}
+
+// NewList creates a list of Expressions.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewList(exprs ...Expr) *ListLit {
+ return &ListLit{Elts: exprs}
+}
+
+type Ellipsis struct {
+ Ellipsis token.Pos // open list if set
+ Type Expr // type for the remaining elements
+
+ comments
+ decl
+ expr
+}
+
+// A ForClause node represents a for clause in a comprehension.
+type ForClause struct {
+ For token.Pos
+ Key *Ident // allow pattern matching?
+ // TODO: change to Comma
+ Colon token.Pos
+ Value *Ident // allow pattern matching?
+ In token.Pos
+ Source Expr
+
+ comments
+ clause
+}
+
+// A IfClause node represents an if guard clause in a comprehension.
+type IfClause struct {
+ If token.Pos
+ Condition Expr
+
+ comments
+ clause
+}
+
+// A LetClause node represents a let clause in a comprehension.
+type LetClause struct {
+ Let token.Pos
+ Ident *Ident
+ Equal token.Pos
+ Expr Expr
+
+ comments
+ clause
+ decl
+}
+
+// A ParenExpr node represents a parenthesized expression.
+type ParenExpr struct {
+ Lparen token.Pos // position of "("
+ X Expr // parenthesized expression
+ Rparen token.Pos // position of ")"
+
+ comments
+ expr
+ label
+}
+
+// A SelectorExpr node represents an expression followed by a selector.
+type SelectorExpr struct {
+ X Expr // expression
+ Sel Label // field selector
+
+ comments
+ expr
+}
+
+// NewSel creates a sequence of selectors.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewSel(x Expr, sel ...string) Expr {
+ for _, s := range sel {
+ x = &SelectorExpr{X: x, Sel: NewIdent(s)}
+ }
+ return x
+}
+
+// An IndexExpr node represents an expression followed by an index.
+type IndexExpr struct {
+ X Expr // expression
+ Lbrack token.Pos // position of "["
+ Index Expr // index expression
+ Rbrack token.Pos // position of "]"
+
+ comments
+ expr
+}
+
+// An SliceExpr node represents an expression followed by slice indices.
+type SliceExpr struct {
+ X Expr // expression
+ Lbrack token.Pos // position of "["
+ Low Expr // begin of slice range; or nil
+ High Expr // end of slice range; or nil
+ Rbrack token.Pos // position of "]"
+
+ comments
+ expr
+}
+
+// A CallExpr node represents an expression followed by an argument list.
+type CallExpr struct {
+ Fun Expr // function expression
+ Lparen token.Pos // position of "("
+ Args []Expr // function arguments; or nil
+ Rparen token.Pos // position of ")"
+
+ comments
+ expr
+}
+
+// NewCall creates a new CallExpr.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewCall(fun Expr, args ...Expr) *CallExpr {
+ return &CallExpr{Fun: fun, Args: args}
+}
+
+// A UnaryExpr node represents a unary expression.
+type UnaryExpr struct {
+ OpPos token.Pos // position of Op
+ Op token.Token // operator
+ X Expr // operand
+
+ comments
+ expr
+}
+
+// A BinaryExpr node represents a binary expression.
+type BinaryExpr struct {
+ X Expr // left operand
+ OpPos token.Pos // position of Op
+ Op token.Token // operator
+ Y Expr // right operand
+
+ comments
+ expr
+}
+
+// NewBinExpr creates for list of expressions of length 2 or greater a chained
+// binary expression of the form (((x1 op x2) op x3) ...). For lists of length
+// 1 it returns the expression itself. It panics for empty lists.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewBinExpr(op token.Token, operands ...Expr) Expr {
+ if len(operands) == 0 {
+ return nil
+ }
+ expr := operands[0]
+ for _, e := range operands[1:] {
+ expr = &BinaryExpr{X: expr, Op: op, Y: e}
+ }
+ return expr
+}
+
+// token.Pos and End implementations for expression/type nodes.
+
+func (x *BadExpr) Pos() token.Pos { return x.From }
+func (x *BadExpr) pos() *token.Pos { return &x.From }
+func (x *Ident) Pos() token.Pos { return x.NamePos }
+func (x *Ident) pos() *token.Pos { return &x.NamePos }
+func (x *BasicLit) Pos() token.Pos { return x.ValuePos }
+func (x *BasicLit) pos() *token.Pos { return &x.ValuePos }
+func (x *Interpolation) Pos() token.Pos { return x.Elts[0].Pos() }
+func (x *Interpolation) pos() *token.Pos { return x.Elts[0].pos() }
+func (x *StructLit) Pos() token.Pos { return getPos(x) }
+func (x *StructLit) pos() *token.Pos {
+ if x.Lbrace == token.NoPos && len(x.Elts) > 0 {
+ return x.Elts[0].pos()
+ }
+ return &x.Lbrace
+}
+
+func (x *ListLit) Pos() token.Pos { return x.Lbrack }
+func (x *ListLit) pos() *token.Pos { return &x.Lbrack }
+func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis }
+func (x *Ellipsis) pos() *token.Pos { return &x.Ellipsis }
+func (x *LetClause) Pos() token.Pos { return x.Let }
+func (x *LetClause) pos() *token.Pos { return &x.Let }
+func (x *ForClause) Pos() token.Pos { return x.For }
+func (x *ForClause) pos() *token.Pos { return &x.For }
+func (x *IfClause) Pos() token.Pos { return x.If }
+func (x *IfClause) pos() *token.Pos { return &x.If }
+func (x *ParenExpr) Pos() token.Pos { return x.Lparen }
+func (x *ParenExpr) pos() *token.Pos { return &x.Lparen }
+func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *SelectorExpr) pos() *token.Pos { return x.X.pos() }
+func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *IndexExpr) pos() *token.Pos { return x.X.pos() }
+func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *SliceExpr) pos() *token.Pos { return x.X.pos() }
+func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() }
+func (x *CallExpr) pos() *token.Pos { return x.Fun.pos() }
+func (x *UnaryExpr) Pos() token.Pos { return x.OpPos }
+func (x *UnaryExpr) pos() *token.Pos { return &x.OpPos }
+func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *BinaryExpr) pos() *token.Pos { return x.X.pos() }
+func (x *BottomLit) Pos() token.Pos { return x.Bottom }
+func (x *BottomLit) pos() *token.Pos { return &x.Bottom }
+
+func (x *BadExpr) End() token.Pos { return x.To }
+func (x *Ident) End() token.Pos {
+ return x.NamePos.Add(len(x.Name))
+}
+func (x *BasicLit) End() token.Pos { return x.ValuePos.Add(len(x.Value)) }
+
+func (x *Interpolation) End() token.Pos { return x.Elts[len(x.Elts)-1].Pos() }
+func (x *StructLit) End() token.Pos {
+ if x.Rbrace == token.NoPos && len(x.Elts) > 0 {
+ return x.Elts[len(x.Elts)-1].Pos()
+ }
+ return x.Rbrace.Add(1)
+}
+func (x *ListLit) End() token.Pos { return x.Rbrack.Add(1) }
+func (x *Ellipsis) End() token.Pos {
+ if x.Type != nil {
+ return x.Type.End()
+ }
+ return x.Ellipsis.Add(3) // len("...")
+}
+func (x *LetClause) End() token.Pos { return x.Expr.End() }
+func (x *ForClause) End() token.Pos { return x.Source.End() }
+func (x *IfClause) End() token.Pos { return x.Condition.End() }
+func (x *ParenExpr) End() token.Pos { return x.Rparen.Add(1) }
+func (x *SelectorExpr) End() token.Pos { return x.Sel.End() }
+func (x *IndexExpr) End() token.Pos { return x.Rbrack.Add(1) }
+func (x *SliceExpr) End() token.Pos { return x.Rbrack.Add(1) }
+func (x *CallExpr) End() token.Pos { return x.Rparen.Add(1) }
+func (x *UnaryExpr) End() token.Pos { return x.X.End() }
+func (x *BinaryExpr) End() token.Pos { return x.Y.End() }
+func (x *BottomLit) End() token.Pos { return x.Bottom.Add(1) }
+
+// ----------------------------------------------------------------------------
+// Convenience functions for Idents
+
+// NewIdent creates a new Ident without position.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewIdent(name string) *Ident {
+ return &Ident{token.NoPos, name, nil, nil, comments{}, label{}, expr{}}
+}
+
+func (id *Ident) String() string {
+ if id != nil {
+ return id.Name
+ }
+ return ""
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// An ImportSpec node represents a single package import.
+type ImportSpec struct {
+ Name *Ident // local package name (including "."); or nil
+ Path *BasicLit // import path
+ EndPos token.Pos // end of spec (overrides Path.Pos if nonzero)
+
+ comments
+}
+
+func (*ImportSpec) specNode() {}
+
+func NewImport(name *Ident, importPath string) *ImportSpec {
+ importPath = literal.String.Quote(importPath)
+ path := &BasicLit{Kind: token.STRING, Value: importPath}
+ return &ImportSpec{Name: name, Path: path}
+}
+
+// Pos and End implementations for spec nodes.
+
+func (s *ImportSpec) Pos() token.Pos { return getPos(s) }
+func (s *ImportSpec) pos() *token.Pos {
+ if s.Name != nil {
+ return s.Name.pos()
+ }
+ return s.Path.pos()
+}
+
+// func (s *AliasSpec) Pos() token.Pos { return s.Name.Pos() }
+// func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() }
+// func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() }
+
+func (s *ImportSpec) End() token.Pos {
+ if s.EndPos != token.NoPos {
+ return s.EndPos
+ }
+ return s.Path.End()
+}
+
+// A BadDecl node is a placeholder for declarations containing
+// syntax errors for which no correct declaration nodes can be
+// created.
+type BadDecl struct {
+ From, To token.Pos // position range of bad declaration
+
+ comments
+ decl
+}
+
+// A ImportDecl node represents a series of import declarations. A valid
+// Lparen position (Lparen.Line > 0) indicates a parenthesized declaration.
+type ImportDecl struct {
+ Import token.Pos
+ Lparen token.Pos // position of '(', if any
+ Specs []*ImportSpec
+ Rparen token.Pos // position of ')', if any
+
+ comments
+ decl
+}
+
+type Spec interface {
+ Node
+ specNode()
+}
+
+// An EmbedDecl node represents a single expression used as a declaration.
+// The expressions in this declaration is what will be emitted as
+// configuration output.
+//
+// An EmbedDecl may only appear at the top level.
+type EmbedDecl struct {
+ Expr Expr
+
+ comments
+ decl
+}
+
+// Pos and End implementations for declaration nodes.
+
+func (d *BadDecl) Pos() token.Pos { return d.From }
+func (d *BadDecl) pos() *token.Pos { return &d.From }
+func (d *ImportDecl) Pos() token.Pos { return d.Import }
+func (d *ImportDecl) pos() *token.Pos { return &d.Import }
+func (d *EmbedDecl) Pos() token.Pos { return d.Expr.Pos() }
+func (d *EmbedDecl) pos() *token.Pos { return d.Expr.pos() }
+
+func (d *BadDecl) End() token.Pos { return d.To }
+func (d *ImportDecl) End() token.Pos {
+ if d.Rparen.IsValid() {
+ return d.Rparen.Add(1)
+ }
+ if len(d.Specs) == 0 {
+ return token.NoPos
+ }
+ return d.Specs[0].End()
+}
+func (d *EmbedDecl) End() token.Pos { return d.Expr.End() }
+
+// ----------------------------------------------------------------------------
+// Files and packages
+
+// A File node represents a Go source file.
+//
+// The Comments list contains all comments in the source file in order of
+// appearance, including the comments that are pointed to from other nodes
+// via Doc and Comment fields.
+type File struct {
+ Filename string
+ Decls []Decl // top-level declarations; or nil
+
+ Imports []*ImportSpec // imports in this file
+ Unresolved []*Ident // unresolved identifiers in this file
+
+ comments
+}
+
+// Preamble returns the declarations of the preamble.
+func (f *File) Preamble() []Decl {
+ p := 0
+outer:
+ for i, d := range f.Decls {
+ switch d.(type) {
+ default:
+ break outer
+
+ case *Package:
+ p = i + 1
+ case *CommentGroup:
+ case *Attribute:
+ case *ImportDecl:
+ p = i + 1
+ }
+ }
+ return f.Decls[:p]
+}
+
+func (f *File) VisitImports(fn func(d *ImportDecl)) {
+ for _, d := range f.Decls {
+ switch x := d.(type) {
+ case *CommentGroup:
+ case *Package:
+ case *Attribute:
+ case *ImportDecl:
+ fn(x)
+ default:
+ return
+ }
+ }
+}
+
+// PackageName returns the package name associated with this file or "" if no
+// package is associated.
+func (f *File) PackageName() string {
+ for _, d := range f.Decls {
+ switch x := d.(type) {
+ case *Package:
+ return x.Name.Name
+ case *CommentGroup, *Attribute:
+ default:
+ return ""
+ }
+ }
+ return ""
+}
+
+func (f *File) Pos() token.Pos {
+ if len(f.Decls) > 0 {
+ return f.Decls[0].Pos()
+ }
+ if f.Filename != "" {
+ // TODO. Do something more principled and efficient.
+ return token.NewFile(f.Filename, -1, 1).Pos(0, 0)
+ }
+ return token.NoPos
+}
+
+func (f *File) pos() *token.Pos {
+ if len(f.Decls) > 0 {
+ return f.Decls[0].pos()
+ }
+ if f.Filename != "" {
+ return nil
+ }
+ return nil
+}
+
+func (f *File) End() token.Pos {
+ if n := len(f.Decls); n > 0 {
+ return f.Decls[n-1].End()
+ }
+ return token.NoPos
+}
+
+// A Package represents a package clause.
+type Package struct {
+ PackagePos token.Pos // position of "package" pseudo-keyword
+ Name *Ident // package name
+
+ comments
+ decl
+}
+
+func (p *Package) Pos() token.Pos { return getPos(p) }
+func (p *Package) pos() *token.Pos {
+ if p.PackagePos != token.NoPos {
+ return &p.PackagePos
+ }
+ if p.Name != nil {
+ return p.Name.pos()
+ }
+ return nil
+}
+
+func (p *Package) End() token.Pos {
+ if p.Name != nil {
+ return p.Name.End()
+ }
+ return token.NoPos
+}
diff --git a/vendor/cuelang.org/go/cue/ast/astutil/apply.go b/vendor/cuelang.org/go/cue/ast/astutil/apply.go
new file mode 100644
index 0000000000..22d12c60f3
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/astutil/apply.go
@@ -0,0 +1,512 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package astutil
+
+import (
+ "encoding/hex"
+ "fmt"
+ "hash/fnv"
+ "reflect"
+
+ "cuelang.org/go/cue/ast"
+)
+
+// A Cursor describes a node encountered during Apply.
+// Information about the node and its parent is available
+// from the Node, Parent, and Index methods.
+//
+// The methods Replace, Delete, InsertBefore, and InsertAfter
+// can be used to change the AST without disrupting Apply.
+// Delete, InsertBefore, and InsertAfter are only defined for modifying
+// a StructLit and will panic in any other context.
+type Cursor interface {
+ // Node returns the current Node.
+ Node() ast.Node
+
+ // Parent returns the parent of the current Node.
+ Parent() Cursor
+
+ // Index reports the index >= 0 of the current Node in the slice of Nodes
+ // that contains it, or a value < 0 if the current Node is not part of a
+ // list.
+ Index() int
+
+ // Import reports an opaque identifier that refers to the given package. It
+ // may only be called if the input to apply was an ast.File. If the import
+ // does not exist, it will be added.
+ Import(path string) *ast.Ident
+
+ // Replace replaces the current Node with n.
+ // The replacement node is not walked by Apply. Comments of the old node
+ // are copied to the new node if it has not yet an comments associated
+ // with it.
+ Replace(n ast.Node)
+
+ // Delete deletes the current Node from its containing struct.
+ // If the current Node is not part of a struct, Delete panics.
+ Delete()
+
+ // InsertAfter inserts n after the current Node in its containing struct.
+ // If the current Node is not part of a struct, InsertAfter panics.
+ // Unless n is wrapped by ApplyRecursively, Apply does not walk n.
+ InsertAfter(n ast.Node)
+
+ // InsertBefore inserts n before the current Node in its containing struct.
+ // If the current Node is not part of a struct, InsertBefore panics.
+ // Unless n is wrapped by ApplyRecursively, Apply does not walk n.
+ InsertBefore(n ast.Node)
+
+ self() *cursor
+}
+
+// ApplyRecursively indicates that a node inserted with InsertBefore,
+// or InsertAfter should be processed recursively.
+func ApplyRecursively(n ast.Node) ast.Node {
+ return recursive{n}
+}
+
+type recursive struct {
+ ast.Node
+}
+
+type info struct {
+ f *ast.File
+ current *declsCursor
+
+ importPatch []*ast.Ident
+}
+
+type cursor struct {
+ file *info
+ parent Cursor
+ node ast.Node
+ typ interface{} // the type of the node
+ index int // position of any of the sub types.
+ replaced bool
+}
+
+func newCursor(parent Cursor, n ast.Node, typ interface{}) *cursor {
+ return &cursor{
+ parent: parent,
+ typ: typ,
+ node: n,
+ index: -1,
+ }
+}
+
+func fileInfo(c Cursor) (info *info) {
+ for ; c != nil; c = c.Parent() {
+ if i := c.self().file; i != nil {
+ return i
+ }
+ }
+ return nil
+}
+
+func (c *cursor) self() *cursor { return c }
+func (c *cursor) Parent() Cursor { return c.parent }
+func (c *cursor) Index() int { return c.index }
+func (c *cursor) Node() ast.Node { return c.node }
+
+func (c *cursor) Import(importPath string) *ast.Ident {
+ info := fileInfo(c)
+ if info == nil {
+ return nil
+ }
+
+ name := ImportPathName(importPath)
+
+ // TODO: come up with something much better.
+ // For instance, hoist the uniquer form cue/export.go to
+ // here and make export.go use this.
+ hash := fnv.New32()
+ name += hex.EncodeToString(hash.Sum([]byte(importPath)))[:6]
+
+ spec := insertImport(&info.current.decls, &ast.ImportSpec{
+ Name: ast.NewIdent(name),
+ Path: ast.NewString(importPath),
+ })
+
+ ident := &ast.Ident{Node: spec} // Name is set later.
+ info.importPatch = append(info.importPatch, ident)
+
+ ident.Name = name
+
+ return ident
+}
+
+func (c *cursor) Replace(n ast.Node) {
+ // panic if the value cannot convert to the original type.
+ reflect.ValueOf(n).Convert(reflect.TypeOf(c.typ).Elem())
+ if ast.Comments(n) != nil {
+ CopyComments(n, c.node)
+ }
+ if r, ok := n.(recursive); ok {
+ n = r.Node
+ } else {
+ c.replaced = true
+ }
+ c.node = n
+}
+
+func (c *cursor) InsertAfter(n ast.Node) { panic("unsupported") }
+func (c *cursor) InsertBefore(n ast.Node) { panic("unsupported") }
+func (c *cursor) Delete() { panic("unsupported") }
+
+// Apply traverses a syntax tree recursively, starting with root,
+// and calling pre and post for each node as described below.
+// Apply returns the syntax tree, possibly modified.
+//
+// If pre is not nil, it is called for each node before the node's
+// children are traversed (pre-order). If pre returns false, no
+// children are traversed, and post is not called for that node.
+//
+// If post is not nil, and a prior call of pre didn't return false,
+// post is called for each node after its children are traversed
+// (post-order). If post returns false, traversal is terminated and
+// Apply returns immediately.
+//
+// Only fields that refer to AST nodes are considered children;
+// i.e., token.Pos, Scopes, Objects, and fields of basic types
+// (strings, etc.) are ignored.
+//
+// Children are traversed in the order in which they appear in the
+// respective node's struct definition.
+//
+func Apply(node ast.Node, before, after func(Cursor) bool) ast.Node {
+ apply(&applier{before: before, after: after}, nil, &node)
+ return node
+}
+
+// A applyVisitor's before method is invoked for each node encountered by Walk.
+// If the result applyVisitor w is true, Walk visits each of the children
+// of node with the applyVisitor w, followed by a call of w.After.
+type applyVisitor interface {
+ Before(Cursor) applyVisitor
+ After(Cursor) bool
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func applyExprList(v applyVisitor, parent Cursor, ptr interface{}, list []ast.Expr) {
+ c := newCursor(parent, nil, nil)
+ for i, x := range list {
+ c.index = i
+ c.node = x
+ c.typ = &list[i]
+ applyCursor(v, c)
+ if x != c.node {
+ list[i] = c.node.(ast.Expr)
+ }
+ }
+}
+
+type declsCursor struct {
+ *cursor
+ decls, after, process []ast.Decl
+ delete bool
+}
+
+func (c *declsCursor) InsertAfter(n ast.Node) {
+ if r, ok := n.(recursive); ok {
+ n = r.Node
+ c.process = append(c.process, n.(ast.Decl))
+ }
+ c.after = append(c.after, n.(ast.Decl))
+}
+
+func (c *declsCursor) InsertBefore(n ast.Node) {
+ if r, ok := n.(recursive); ok {
+ n = r.Node
+ c.process = append(c.process, n.(ast.Decl))
+ }
+ c.decls = append(c.decls, n.(ast.Decl))
+}
+
+func (c *declsCursor) Delete() { c.delete = true }
+
+func applyDeclList(v applyVisitor, parent Cursor, list []ast.Decl) []ast.Decl {
+ c := &declsCursor{
+ cursor: newCursor(parent, nil, nil),
+ decls: make([]ast.Decl, 0, len(list)),
+ }
+ if file, ok := parent.Node().(*ast.File); ok {
+ c.cursor.file = &info{f: file, current: c}
+ }
+ for i, x := range list {
+ c.node = x
+ c.typ = &list[i]
+ applyCursor(v, c)
+ if !c.delete {
+ c.decls = append(c.decls, c.node.(ast.Decl))
+ }
+ c.delete = false
+ for i := 0; i < len(c.process); i++ {
+ x := c.process[i]
+ c.node = x
+ c.typ = &c.process[i]
+ applyCursor(v, c)
+ if c.delete {
+ panic("cannot delete a node that was added with InsertBefore or InsertAfter")
+ }
+ }
+ c.decls = append(c.decls, c.after...)
+ c.after = c.after[:0]
+ c.process = c.process[:0]
+ }
+
+ // TODO: ultimately, programmatically linked nodes have to be resolved
+ // at the end.
+ // if info := c.cursor.file; info != nil {
+ // done := map[*ast.ImportSpec]bool{}
+ // for _, ident := range info.importPatch {
+ // spec := ident.Node.(*ast.ImportSpec)
+ // if done[spec] {
+ // continue
+ // }
+ // done[spec] = true
+
+ // path, _ := strconv.Unquote(spec.Path)
+
+ // ident.Name =
+ // }
+ // }
+
+ return c.decls
+}
+
+func apply(v applyVisitor, parent Cursor, nodePtr interface{}) {
+ res := reflect.Indirect(reflect.ValueOf(nodePtr))
+ n := res.Interface()
+ node := n.(ast.Node)
+ c := newCursor(parent, node, nodePtr)
+ applyCursor(v, c)
+ if node != c.node {
+ res.Set(reflect.ValueOf(c.node))
+ }
+}
+
+// applyCursor traverses an AST in depth-first order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, apply is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+func applyCursor(v applyVisitor, c Cursor) {
+ if v = v.Before(c); v == nil {
+ return
+ }
+
+ node := c.Node()
+
+ // TODO: record the comment groups and interleave with the values like for
+ // parsing and printing?
+ comments := node.Comments()
+ for _, cm := range comments {
+ apply(v, c, &cm)
+ }
+
+ // apply children
+ // (the order of the cases matches the order
+ // of the corresponding node types in go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ for _, cg := range n.List {
+ apply(v, c, &cg)
+ }
+
+ case *ast.Attribute:
+ // nothing to do
+
+ case *ast.Field:
+ apply(v, c, &n.Label)
+ if n.Value != nil {
+ apply(v, c, &n.Value)
+ }
+ for _, a := range n.Attrs {
+ apply(v, c, &a)
+ }
+
+ case *ast.StructLit:
+ n.Elts = applyDeclList(v, c, n.Elts)
+
+ // Expressions
+ case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Interpolation:
+ applyExprList(v, c, &n, n.Elts)
+
+ case *ast.ListLit:
+ applyExprList(v, c, &n, n.Elts)
+
+ case *ast.Ellipsis:
+ if n.Type != nil {
+ apply(v, c, &n.Type)
+ }
+
+ case *ast.ParenExpr:
+ apply(v, c, &n.X)
+
+ case *ast.SelectorExpr:
+ apply(v, c, &n.X)
+ apply(v, c, &n.Sel)
+
+ case *ast.IndexExpr:
+ apply(v, c, &n.X)
+ apply(v, c, &n.Index)
+
+ case *ast.SliceExpr:
+ apply(v, c, &n.X)
+ if n.Low != nil {
+ apply(v, c, &n.Low)
+ }
+ if n.High != nil {
+ apply(v, c, &n.High)
+ }
+
+ case *ast.CallExpr:
+ apply(v, c, &n.Fun)
+ applyExprList(v, c, &n, n.Args)
+
+ case *ast.UnaryExpr:
+ apply(v, c, &n.X)
+
+ case *ast.BinaryExpr:
+ apply(v, c, &n.X)
+ apply(v, c, &n.Y)
+
+ // Declarations
+ case *ast.ImportSpec:
+ if n.Name != nil {
+ apply(v, c, &n.Name)
+ }
+ apply(v, c, &n.Path)
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.ImportDecl:
+ for _, s := range n.Specs {
+ apply(v, c, &s)
+ }
+
+ case *ast.EmbedDecl:
+ apply(v, c, &n.Expr)
+
+ case *ast.LetClause:
+ apply(v, c, &n.Ident)
+ apply(v, c, &n.Expr)
+
+ case *ast.Alias:
+ apply(v, c, &n.Ident)
+ apply(v, c, &n.Expr)
+
+ case *ast.Comprehension:
+ clauses := n.Clauses
+ for i := range n.Clauses {
+ apply(v, c, &clauses[i])
+ }
+ apply(v, c, &n.Value)
+
+ // Files and packages
+ case *ast.File:
+ n.Decls = applyDeclList(v, c, n.Decls)
+
+ case *ast.Package:
+ apply(v, c, &n.Name)
+
+ case *ast.ForClause:
+ if n.Key != nil {
+ apply(v, c, &n.Key)
+ }
+ apply(v, c, &n.Value)
+ apply(v, c, &n.Source)
+
+ case *ast.IfClause:
+ apply(v, c, &n.Condition)
+
+ default:
+ panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+ }
+
+ v.After(c)
+}
+
+type applier struct {
+ before func(Cursor) bool
+ after func(Cursor) bool
+
+ commentStack []commentFrame
+ current commentFrame
+}
+
+type commentFrame struct {
+ cg []*ast.CommentGroup
+ pos int8
+}
+
+func (f *applier) Before(c Cursor) applyVisitor {
+ node := c.Node()
+ if f.before == nil || (f.before(c) && node == c.Node()) {
+ f.commentStack = append(f.commentStack, f.current)
+ f.current = commentFrame{cg: node.Comments()}
+ f.visitComments(c, f.current.pos)
+ return f
+ }
+ return nil
+}
+
+func (f *applier) After(c Cursor) bool {
+ f.visitComments(c, 127)
+ p := len(f.commentStack) - 1
+ f.current = f.commentStack[p]
+ f.commentStack = f.commentStack[:p]
+ f.current.pos++
+ if f.after != nil {
+ f.after(c)
+ }
+ return true
+}
+
+func (f *applier) visitComments(p Cursor, pos int8) {
+ c := &f.current
+ for i := 0; i < len(c.cg); i++ {
+ cg := c.cg[i]
+ if cg.Position == pos {
+ continue
+ }
+ cursor := newCursor(p, cg, cg)
+ if f.before == nil || (f.before(cursor) && !cursor.replaced) {
+ for j, c := range cg.List {
+ cursor := newCursor(p, c, &c)
+ if f.before == nil || (f.before(cursor) && !cursor.replaced) {
+ if f.after != nil {
+ f.after(cursor)
+ }
+ }
+ cg.List[j] = cursor.node.(*ast.Comment)
+ }
+ if f.after != nil {
+ f.after(cursor)
+ }
+ }
+ c.cg[i] = cursor.node.(*ast.CommentGroup)
+ }
+}
diff --git a/vendor/cuelang.org/go/cue/ast/astutil/file.go b/vendor/cuelang.org/go/cue/ast/astutil/file.go
new file mode 100644
index 0000000000..e060b71194
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/astutil/file.go
@@ -0,0 +1,38 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package astutil
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+)
+
+// ToFile converts an expression to a File. It will create an import section for
+// any of the identifiers in x that refer to an import and will unshadow
+// references as appropriate.
+func ToFile(x ast.Expr) (*ast.File, error) {
+ var f *ast.File
+ if st, ok := x.(*ast.StructLit); ok {
+ f = &ast.File{Decls: st.Elts}
+ } else {
+ ast.SetRelPos(x, token.NoSpace)
+ f = &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: x}}}
+ }
+
+ if err := Sanitize(f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
diff --git a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go
new file mode 100644
index 0000000000..5043e16b20
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go
@@ -0,0 +1,461 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements scopes and the objects they contain.
+
+package astutil
+
+import (
+ "bytes"
+ "fmt"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+)
+
+// An ErrFunc processes errors.
+type ErrFunc func(pos token.Pos, msg string, args ...interface{})
+
+// TODO: future development
+//
+// Resolution currently assigns values along the table below. This is based on
+// Go's resolver and is not quite convenient for CUE's purposes. For one, CUE
+// allows manually setting resolution and than call astutil.Sanitize to
+// normalize the ast.File. Manually assigning resolutions according to the
+// below table is rather tedious though.
+//
+// Instead of using the Scope and Node fields in identifiers, we suggest the
+// following assignments:
+//
+// Reference Node // an Decl or Clause
+// Ident *Ident // The identifier in References (optional)
+//
+// References always refers to the direct element in the scope in which the
+// identifier occurs, not the final value, so: *Field, *LetClause, *ForClause,
+// etc. In case Ident is defined, it must be the same pointer as the
+// referencing identifier. In case it is not defined, the Name of the
+// referencing identifier can be used to locate the proper identifier in the
+// referenced node.
+//
+// The Scope field in the original design then loses its function.
+//
+// Type of reference Scope Node
+// Let Clause File/Struct LetClause
+// Alias declaration File/Struct Alias (deprecated)
+// Illegal Reference File/Struct
+// Value
+// X in a: X=y Field Alias
+// Fields
+// X in X: y File/Struct Expr (y)
+// X in X=x: y File/Struct Field
+// X in X=(x): y File/Struct Field
+// X in X="\(x)": y File/Struct Field
+// X in [X=x]: y Field Expr (x)
+// X in X=[x]: y Field Field
+//
+// for k, v in ForClause Ident
+// let x = y LetClause Ident
+//
+// Fields inside lambda
+// Label Field Expr
+// Value Field Field
+// Pkg nil ImportSpec
+
+// Resolve resolves all identifiers in a file. Unresolved identifiers are
+// recorded in Unresolved. It will not overwrite already resolved values.
+func Resolve(f *ast.File, errFn ErrFunc) {
+ walk(&scope{errFn: errFn, identFn: resolveIdent}, f)
+}
+
+// Resolve resolves all identifiers in an expression.
+// It will not overwrite already resolved values.
+func ResolveExpr(e ast.Expr, errFn ErrFunc) {
+ f := &ast.File{}
+ walk(&scope{file: f, errFn: errFn, identFn: resolveIdent}, e)
+}
+
+// A Scope maintains the set of named language entities declared
+// in the scope and a link to the immediately surrounding (outer)
+// scope.
+//
+type scope struct {
+ file *ast.File
+ outer *scope
+ node ast.Node
+ index map[string]entry
+ inField bool
+
+ identFn func(s *scope, n *ast.Ident) bool
+ nameFn func(name string)
+ errFn func(p token.Pos, msg string, args ...interface{})
+}
+
+type entry struct {
+ node ast.Node
+ link ast.Node // Alias, LetClause, or Field
+}
+
+func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope {
+ const n = 4 // initial scope capacity
+ s := &scope{
+ file: f,
+ outer: outer,
+ node: node,
+ index: make(map[string]entry, n),
+ identFn: outer.identFn,
+ nameFn: outer.nameFn,
+ errFn: outer.errFn,
+ }
+ for _, d := range decls {
+ switch x := d.(type) {
+ case *ast.Field:
+ label := x.Label
+
+ if a, ok := x.Label.(*ast.Alias); ok {
+ // TODO(legacy): use name := a.Ident.Name once quoted
+ // identifiers are no longer supported.
+ label, _ = a.Expr.(ast.Label)
+ if name, _, _ := ast.LabelName(a.Ident); name != "" {
+ if _, ok := label.(*ast.ListLit); !ok {
+ s.insert(name, x, a)
+ }
+ }
+ }
+
+ // default:
+ name, isIdent, _ := ast.LabelName(label)
+ if isIdent {
+ v := x.Value
+ // Avoid interpreting value aliases at this point.
+ if a, ok := v.(*ast.Alias); ok {
+ v = a.Expr
+ }
+ s.insert(name, v, x)
+ }
+ case *ast.LetClause:
+ name, isIdent, _ := ast.LabelName(x.Ident)
+ if isIdent {
+ s.insert(name, x, x)
+ }
+ case *ast.Alias:
+ name, isIdent, _ := ast.LabelName(x.Ident)
+ if isIdent {
+ s.insert(name, x, x)
+ }
+ case *ast.ImportDecl:
+ for _, spec := range x.Specs {
+ info, _ := ParseImportSpec(spec)
+ s.insert(info.Ident, spec, spec)
+ }
+ }
+ }
+ return s
+}
+
+func (s *scope) isLet(n ast.Node) bool {
+ if _, ok := s.node.(*ast.Field); ok {
+ return true
+ }
+ switch n.(type) {
+ case *ast.LetClause, *ast.Alias, *ast.Field:
+ return true
+ }
+ return false
+}
+
+func (s *scope) mustBeUnique(n ast.Node) bool {
+ if _, ok := s.node.(*ast.Field); ok {
+ return true
+ }
+ switch n.(type) {
+ // TODO: add *ast.ImportSpec when some implementations are moved over to
+ // Sanitize.
+ case *ast.ImportSpec, *ast.LetClause, *ast.Alias, *ast.Field:
+ return true
+ }
+ return false
+}
+
+func (s *scope) insert(name string, n, link ast.Node) {
+ if name == "" {
+ return
+ }
+ if s.nameFn != nil {
+ s.nameFn(name)
+ }
+ // TODO: record both positions.
+ if outer, _, existing := s.lookup(name); existing.node != nil {
+ if s.isLet(n) != outer.isLet(existing.node) {
+ s.errFn(n.Pos(), "cannot have both alias and field with name %q in same scope", name)
+ return
+ } else if s.mustBeUnique(n) || outer.mustBeUnique(existing.node) {
+ if outer == s {
+ if _, ok := existing.node.(*ast.ImportSpec); ok {
+ return
+ // TODO:
+ s.errFn(n.Pos(), "conflicting declaration %s\n"+
+ "\tprevious declaration at %s",
+ name, existing.node.Pos())
+ } else {
+ s.errFn(n.Pos(), "alias %q redeclared in same scope", name)
+ }
+ return
+ }
+ // TODO: Should we disallow shadowing of aliases?
+ // This was the case, but it complicates the transition to
+ // square brackets. The spec says allow it.
+ // s.errFn(n.Pos(), "alias %q already declared in enclosing scope", name)
+ }
+ }
+ s.index[name] = entry{node: n, link: link}
+}
+
+func (s *scope) resolveScope(name string, node ast.Node) (scope ast.Node, e entry, ok bool) {
+ last := s
+ for s != nil {
+ if n, ok := s.index[name]; ok && node == n.node {
+ if last.node == n.node {
+ return nil, n, true
+ }
+ return s.node, n, true
+ }
+ s, last = s.outer, s
+ }
+ return nil, entry{}, false
+}
+
+func (s *scope) lookup(name string) (p *scope, obj ast.Node, node entry) {
+ // TODO(#152): consider returning nil for obj if it is a reference to root.
+ // last := s
+ if name == "_" {
+ return nil, nil, entry{}
+ }
+ for s != nil {
+ if n, ok := s.index[name]; ok {
+ if _, ok := n.node.(*ast.ImportSpec); ok {
+ return s, nil, n
+ }
+ return s, s.node, n
+ }
+ // s, last = s.outer, s
+ s = s.outer
+ }
+ return nil, nil, entry{}
+}
+
+func (s *scope) After(n ast.Node) {}
+func (s *scope) Before(n ast.Node) (w visitor) {
+ switch x := n.(type) {
+ case *ast.File:
+ s := newScope(x, s, x, x.Decls)
+ // Support imports.
+ for _, d := range x.Decls {
+ walk(s, d)
+ }
+ return nil
+
+ case *ast.StructLit:
+ return newScope(s.file, s, x, x.Elts)
+
+ case *ast.Comprehension:
+ s = scopeClauses(s, x.Clauses)
+ walk(s, x.Value)
+ return nil
+
+ case *ast.Field:
+ var n ast.Node = x.Label
+ alias, ok := x.Label.(*ast.Alias)
+ if ok {
+ n = alias.Expr
+ }
+
+ switch label := n.(type) {
+ case *ast.ParenExpr:
+ walk(s, label)
+
+ case *ast.Interpolation:
+ walk(s, label)
+
+ case *ast.ListLit:
+ if len(label.Elts) != 1 {
+ break
+ }
+ s = newScope(s.file, s, x, nil)
+ if alias != nil {
+ if name, _, _ := ast.LabelName(alias.Ident); name != "" {
+ s.insert(name, x, alias)
+ }
+ }
+
+ expr := label.Elts[0]
+
+ if a, ok := expr.(*ast.Alias); ok {
+ expr = a.Expr
+
+ // Add to current scope, instead of the value's, and allow
+ // references to bind to these illegally.
+ // We need this kind of administration anyway to detect
+ // illegal name clashes, and it allows giving better error
+ // messages. This puts the burdon on clients of this library
+ // to detect illegal usage, though.
+ name, err := ast.ParseIdent(a.Ident)
+ if err == nil {
+ s.insert(name, a.Expr, a)
+ }
+ }
+
+ ast.Walk(expr, nil, func(n ast.Node) {
+ if x, ok := n.(*ast.Ident); ok {
+ for s := s; s != nil && !s.inField; s = s.outer {
+ if _, ok := s.index[x.Name]; ok {
+ s.errFn(n.Pos(),
+ "reference %q in label expression refers to field against which it would be matched", x.Name)
+ }
+ }
+ }
+ })
+ walk(s, expr)
+ }
+
+ if n := x.Value; n != nil {
+ if alias, ok := x.Value.(*ast.Alias); ok {
+ // TODO: this should move into Before once decl attributes
+ // have been fully deprecated and embed attributes are introduced.
+ s = newScope(s.file, s, x, nil)
+ s.insert(alias.Ident.Name, alias, x)
+ n = alias.Expr
+ }
+ s.inField = true
+ walk(s, n)
+ s.inField = false
+ }
+
+ return nil
+
+ case *ast.LetClause:
+ // Disallow referring to the current LHS name.
+ name := x.Ident.Name
+ saved := s.index[name]
+ delete(s.index, name) // The same name may still appear in another scope
+
+ if x.Expr != nil {
+ walk(s, x.Expr)
+ }
+ s.index[name] = saved
+ return nil
+
+ case *ast.Alias:
+ // Disallow referring to the current LHS name.
+ name := x.Ident.Name
+ saved := s.index[name]
+ delete(s.index, name) // The same name may still appear in another scope
+
+ if x.Expr != nil {
+ walk(s, x.Expr)
+ }
+ s.index[name] = saved
+ return nil
+
+ case *ast.ImportSpec:
+ return nil
+
+ case *ast.Attribute:
+ // TODO: tokenize attributes, resolve identifiers and store the ones
+ // that resolve in a list.
+
+ case *ast.SelectorExpr:
+ walk(s, x.X)
+ return nil
+
+ case *ast.Ident:
+ if s.identFn(s, x) {
+ return nil
+ }
+ }
+ return s
+}
+
+func resolveIdent(s *scope, x *ast.Ident) bool {
+ name, ok, _ := ast.LabelName(x)
+ if !ok {
+ // TODO: generate error
+ return false
+ }
+ if _, obj, node := s.lookup(name); node.node != nil {
+ switch {
+ case x.Node == nil:
+ x.Node = node.node
+ x.Scope = obj
+
+ case x.Node == node.node:
+ x.Scope = obj
+
+ default: // x.Node != node
+ scope, _, ok := s.resolveScope(name, x.Node)
+ if !ok {
+ s.file.Unresolved = append(s.file.Unresolved, x)
+ }
+ x.Scope = scope
+ }
+ } else {
+ s.file.Unresolved = append(s.file.Unresolved, x)
+ }
+ return true
+}
+
+func scopeClauses(s *scope, clauses []ast.Clause) *scope {
+ for _, c := range clauses {
+ switch x := c.(type) {
+ case *ast.ForClause:
+ walk(s, x.Source)
+ s = newScope(s.file, s, x, nil)
+ if x.Key != nil {
+ name, err := ast.ParseIdent(x.Key)
+ if err == nil {
+ s.insert(name, x.Key, x)
+ }
+ }
+ name, err := ast.ParseIdent(x.Value)
+ if err == nil {
+ s.insert(name, x.Value, x)
+ }
+
+ case *ast.LetClause:
+ walk(s, x.Expr)
+ s = newScope(s.file, s, x, nil)
+ name, err := ast.ParseIdent(x.Ident)
+ if err == nil {
+ s.insert(name, x.Ident, x)
+ }
+
+ default:
+ walk(s, c)
+ }
+ }
+ return s
+}
+
+// Debugging support
+func (s *scope) String() string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "scope %p {", s)
+ if s != nil && len(s.index) > 0 {
+ fmt.Fprintln(&buf)
+ for name := range s.index {
+ fmt.Fprintf(&buf, "\t%v\n", name)
+ }
+ }
+ fmt.Fprintf(&buf, "}\n")
+ return buf.String()
+}
diff --git a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go
new file mode 100644
index 0000000000..061a46b6f1
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go
@@ -0,0 +1,354 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package astutil
+
+import (
+ "fmt"
+ "math/rand"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// TODO:
+// - handle comprehensions
+// - change field from foo to "foo" if it isn't referenced, rather than
+// relying on introducing a unique alias.
+// - change a predeclared identifier reference to use the __ident form,
+// instead of introducing an alias.
+
+// Sanitize rewrites File f in place to be well formed after automated
+// construction of an AST.
+//
+// Rewrites:
+// - auto inserts imports associated with Idents
+// - unshadows imports associated with idents
+// - unshadows references for identifiers that were already resolved.
+//
+func Sanitize(f *ast.File) error {
+ z := &sanitizer{
+ file: f,
+ rand: rand.New(rand.NewSource(808)),
+
+ names: map[string]bool{},
+ importMap: map[string]*ast.ImportSpec{},
+ referenced: map[ast.Node]bool{},
+ altMap: map[ast.Node]string{},
+ }
+
+ // Gather all names.
+ walk(&scope{
+ errFn: z.errf,
+ nameFn: z.addName,
+ identFn: z.markUsed,
+ }, f)
+ if z.errs != nil {
+ return z.errs
+ }
+
+ // Add imports and unshadow.
+ s := &scope{
+ file: f,
+ errFn: z.errf,
+ identFn: z.handleIdent,
+ index: make(map[string]entry),
+ }
+ z.fileScope = s
+ walk(s, f)
+ if z.errs != nil {
+ return z.errs
+ }
+
+ z.cleanImports()
+
+ return z.errs
+}
+
+type sanitizer struct {
+ file *ast.File
+ fileScope *scope
+
+ rand *rand.Rand
+
+ // names is all used names. Can be used to determine a new unique name.
+ names map[string]bool
+ referenced map[ast.Node]bool
+
+ // altMap defines an alternative name for an existing entry link (a field,
+ // alias or let clause). As new names are globally unique, they can be
+ // safely reused for any unshadowing.
+ altMap map[ast.Node]string
+ importMap map[string]*ast.ImportSpec
+
+ errs errors.Error
+}
+
+func (z *sanitizer) errf(p token.Pos, msg string, args ...interface{}) {
+ z.errs = errors.Append(z.errs, errors.Newf(p, msg, args...))
+}
+
+func (z *sanitizer) addName(name string) {
+ z.names[name] = true
+}
+
+func (z *sanitizer) addRename(base string, n ast.Node) (alt string, new bool) {
+ if name, ok := z.altMap[n]; ok {
+ return name, false
+ }
+
+ name := z.uniqueName(base, false)
+ z.altMap[n] = name
+ return name, true
+}
+
+func (z *sanitizer) unshadow(parent ast.Node, base string, link ast.Node) string {
+ name, ok := z.altMap[link]
+ if !ok {
+ name = z.uniqueName(base, false)
+ z.altMap[link] = name
+
+ // Insert new let clause at top to refer to a declaration in possible
+ // other files.
+ let := &ast.LetClause{
+ Ident: ast.NewIdent(name),
+ Expr: ast.NewIdent(base),
+ }
+
+ var decls *[]ast.Decl
+
+ switch x := parent.(type) {
+ case *ast.File:
+ decls = &x.Decls
+ case *ast.StructLit:
+ decls = &x.Elts
+ default:
+ panic(fmt.Sprintf("impossible scope type %T", parent))
+ }
+
+ i := 0
+ for ; i < len(*decls); i++ {
+ if (*decls)[i] == link {
+ break
+ }
+ if f, ok := (*decls)[i].(*ast.Field); ok && f.Label == link {
+ break
+ }
+ }
+
+ if i > 0 {
+ ast.SetRelPos(let, token.NewSection)
+ }
+
+ a := append((*decls)[:i:i], let)
+ *decls = append(a, (*decls)[i:]...)
+ }
+ return name
+}
+
+func (z *sanitizer) markUsed(s *scope, n *ast.Ident) bool {
+ if n.Node != nil {
+ return false
+ }
+ _, _, entry := s.lookup(n.String())
+ z.referenced[entry.link] = true
+ return true
+}
+
+func (z *sanitizer) cleanImports() {
+ z.file.VisitImports(func(d *ast.ImportDecl) {
+ k := 0
+ for _, s := range d.Specs {
+ if _, ok := z.referenced[s]; ok {
+ d.Specs[k] = s
+ k++
+ }
+ }
+ d.Specs = d.Specs[:k]
+ })
+}
+
+func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool {
+ if n.Node == nil {
+ return true
+ }
+
+ _, _, node := s.lookup(n.Name)
+ if node.node == nil {
+ spec, ok := n.Node.(*ast.ImportSpec)
+ if !ok {
+ // Clear node. A reference may have been moved to a different
+ // file. If not, it should be an error.
+ n.Node = nil
+ n.Scope = nil
+ return false
+ }
+
+ _ = z.addImport(spec)
+ info, _ := ParseImportSpec(spec)
+ z.fileScope.insert(info.Ident, spec, spec)
+ return true
+ }
+
+ if x, ok := n.Node.(*ast.ImportSpec); ok {
+ xi, _ := ParseImportSpec(x)
+
+ if y, ok := node.node.(*ast.ImportSpec); ok {
+ yi, _ := ParseImportSpec(y)
+ if xi.ID == yi.ID { // name must be identical as a result of lookup.
+ z.referenced[y] = true
+ n.Node = x
+ n.Scope = nil
+ return false
+ }
+ }
+
+ // Either:
+ // - the import is shadowed
+ // - an incorrect import is matched
+ // In all cases we need to create a new import with a unique name or
+ // use a previously created one.
+ spec := z.importMap[xi.ID]
+ if spec == nil {
+ name := z.uniqueName(xi.Ident, false)
+ spec = z.addImport(&ast.ImportSpec{
+ Name: ast.NewIdent(name),
+ Path: x.Path,
+ })
+ z.importMap[xi.ID] = spec
+ z.fileScope.insert(name, spec, spec)
+ }
+
+ info, _ := ParseImportSpec(spec)
+ // TODO(apply): replace n itself directly
+ n.Name = info.Ident
+ n.Node = spec
+ n.Scope = nil
+ return false
+ }
+
+ if node.node == n.Node {
+ return true
+ }
+
+ // n.Node != node and are both not nil and n.Node is not an ImportSpec.
+ // This means that either n.Node is illegal or shadowed.
+ // Look for the scope in which n.Node is defined and add an alias or let.
+
+ parent, e, ok := s.resolveScope(n.Name, n.Node)
+ if !ok {
+ // The node isn't within a legal scope within this file. It may only
+ // possibly shadow a value of another file. We add a top-level let
+ // clause to refer to this value.
+
+ // TODO(apply): better would be to have resolve use Apply so that we can replace
+ // the entire ast.Ident, rather than modifying it.
+ // TODO: resolve to new node or rely on another pass of Resolve?
+ n.Name = z.unshadow(z.file, n.Name, n)
+ n.Node = nil
+ n.Scope = nil
+
+ return false
+ }
+
+ var name string
+ // var isNew bool
+ switch x := e.link.(type) {
+ case *ast.Field: // referring to regular field.
+ name, ok = z.altMap[x]
+ if ok {
+ break
+ }
+ // If this field has not alias, introduce one with a unique name.
+ // If this has an alias, also introduce a new name. There is a
+ // possibility that the alias can be used, but it is easier to just
+ // assign a new name, assuming this case is rather rare.
+ switch y := x.Label.(type) {
+ case *ast.Alias:
+ name = z.unshadow(parent, y.Ident.Name, y)
+
+ case *ast.Ident:
+ var isNew bool
+ name, isNew = z.addRename(y.Name, x)
+ if isNew {
+ ident := ast.NewIdent(name)
+ // Move formatting and comments from original label to alias
+ // identifier.
+ CopyMeta(ident, y)
+ ast.SetRelPos(y, token.NoRelPos)
+ ast.SetComments(y, nil)
+ x.Label = &ast.Alias{Ident: ident, Expr: y}
+ }
+
+ default:
+ // This is an illegal reference.
+ return false
+ }
+
+ case *ast.LetClause:
+ name = z.unshadow(parent, x.Ident.Name, x)
+
+ case *ast.Alias:
+ name = z.unshadow(parent, x.Ident.Name, x)
+
+ default:
+ panic(fmt.Sprintf("unexpected link type %T", e.link))
+ }
+
+ // TODO(apply): better would be to have resolve use Apply so that we can replace
+ // the entire ast.Ident, rather than modifying it.
+ n.Name = name
+ n.Node = nil
+ n.Scope = nil
+
+ return true
+}
+
+// uniqueName returns a new name globally unique name of the form
+// base_XX ... base_XXXXXXXXXXXXXX or _base or the same pattern with a '_'
+// prefix if hidden is true.
+//
+// It prefers short extensions over large ones, while ensuring the likelihood of
+// fast termination is high. There are at least two digits to make it visually
+// clearer this concerns a generated number.
+//
+func (z *sanitizer) uniqueName(base string, hidden bool) string {
+ if hidden && !strings.HasPrefix(base, "_") {
+ base = "_" + base
+ if !z.names[base] {
+ z.names[base] = true
+ return base
+ }
+ }
+
+ // TODO(go1.13): const mask = 0xff_ffff_ffff_ffff
+ const mask = 0xffffffffffffff // max bits; stay clear of int64 overflow
+ const shift = 4 // rate of growth
+ for n := int64(0x10); ; n = int64(mask&((n< 0 {
+ name = name[p+1:]
+ }
+ return name
+}
+
+// ImportInfo describes the information contained in an ImportSpec.
+type ImportInfo struct {
+ Ident string // identifier used to refer to the import
+ PkgName string // name of the package
+ ID string // full import path, including the name
+ Dir string // import path, excluding the name
+}
+
+// ParseImportSpec returns the name and full path of an ImportSpec.
+func ParseImportSpec(spec *ast.ImportSpec) (info ImportInfo, err error) {
+ str, err := strconv.Unquote(spec.Path.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.ID = str
+
+ if p := strings.LastIndexByte(str, ':'); p > 0 {
+ info.Dir = str[:p]
+ info.PkgName = str[p+1:]
+ } else {
+ info.Dir = str
+ info.PkgName = path.Base(str)
+ }
+
+ if spec.Name != nil {
+ info.Ident = spec.Name.Name
+ } else {
+ info.Ident = info.PkgName
+ }
+
+ return info, nil
+}
+
+// CopyComments associates comments of one node with another.
+// It may change the relative position of comments.
+func CopyComments(to, from ast.Node) {
+ if from == nil {
+ return
+ }
+ ast.SetComments(to, from.Comments())
+}
+
+// CopyPosition sets the position of one node to another.
+func CopyPosition(to, from ast.Node) {
+ if from == nil {
+ return
+ }
+ ast.SetPos(to, from.Pos())
+}
+
+// CopyMeta copies comments and position information from one node to another.
+// It returns the destination node.
+func CopyMeta(to, from ast.Node) ast.Node {
+ if from == nil {
+ return to
+ }
+ ast.SetComments(to, from.Comments())
+ ast.SetPos(to, from.Pos())
+ return to
+}
+
+// insertImport looks up an existing import with the given name and path or will
+// add spec if it doesn't exist. It returns a spec in decls matching spec.
+func insertImport(decls *[]ast.Decl, spec *ast.ImportSpec) *ast.ImportSpec {
+ x, _ := ParseImportSpec(spec)
+
+ a := *decls
+
+ var imports *ast.ImportDecl
+ var orig *ast.ImportSpec
+
+ p := 0
+outer:
+ for i := 0; i < len(a); i++ {
+ d := a[i]
+ switch t := d.(type) {
+ default:
+ break outer
+
+ case *ast.Package:
+ p = i + 1
+ case *ast.CommentGroup:
+ p = i + 1
+ case *ast.Attribute:
+ continue
+ case *ast.ImportDecl:
+ p = i + 1
+ imports = t
+ for _, s := range t.Specs {
+ y, _ := ParseImportSpec(s)
+ if y.ID != x.ID {
+ continue
+ }
+ orig = s
+ if x.Ident == "" || y.Ident == x.Ident {
+ return s
+ }
+ }
+ }
+ }
+
+ // Import not found, add one.
+ if imports == nil {
+ imports = &ast.ImportDecl{}
+ preamble := append(a[:p:p], imports)
+ a = append(preamble, a[p:]...)
+ *decls = a
+ }
+
+ if orig != nil {
+ CopyComments(spec, orig)
+ }
+ imports.Specs = append(imports.Specs, spec)
+ ast.SetRelPos(imports.Specs[0], token.NoRelPos)
+
+ return spec
+}
diff --git a/vendor/cuelang.org/go/cue/ast/astutil/walk.go b/vendor/cuelang.org/go/cue/ast/astutil/walk.go
new file mode 100644
index 0000000000..2de73d6e91
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/astutil/walk.go
@@ -0,0 +1,196 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package astutil
+
+import (
+ "fmt"
+
+ "cuelang.org/go/cue/ast"
+)
+
+// TODO: use ast.Walk or adopt that version to allow visitors.
+
+// A visitor's before method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.After.
+type visitor interface {
+ Before(node ast.Node) (w visitor)
+ After(node ast.Node)
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func walkExprList(v visitor, list []ast.Expr) {
+ for _, x := range list {
+ walk(v, x)
+ }
+}
+
+func walkDeclList(v visitor, list []ast.Decl) {
+ for _, x := range list {
+ walk(v, x)
+ }
+}
+
+// walk traverses an AST in depth-first order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+func walk(v visitor, node ast.Node) {
+ if v = v.Before(node); v == nil {
+ return
+ }
+
+ // TODO: record the comment groups and interleave with the values like for
+ // parsing and printing?
+ for _, c := range node.Comments() {
+ walk(v, c)
+ }
+
+ // walk children
+ // (the order of the cases matches the order
+ // of the corresponding node types in go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ for _, c := range n.List {
+ walk(v, c)
+ }
+
+ case *ast.Attribute:
+ // nothing to do
+
+ case *ast.Field:
+ walk(v, n.Label)
+ if n.Value != nil {
+ walk(v, n.Value)
+ }
+ for _, a := range n.Attrs {
+ walk(v, a)
+ }
+
+ case *ast.StructLit:
+ for _, f := range n.Elts {
+ walk(v, f)
+ }
+
+ // Expressions
+ case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Interpolation:
+ for _, e := range n.Elts {
+ walk(v, e)
+ }
+
+ case *ast.ListLit:
+ walkExprList(v, n.Elts)
+
+ case *ast.Ellipsis:
+ if n.Type != nil {
+ walk(v, n.Type)
+ }
+
+ case *ast.ParenExpr:
+ walk(v, n.X)
+
+ case *ast.SelectorExpr:
+ walk(v, n.X)
+ walk(v, n.Sel)
+
+ case *ast.IndexExpr:
+ walk(v, n.X)
+ walk(v, n.Index)
+
+ case *ast.SliceExpr:
+ walk(v, n.X)
+ if n.Low != nil {
+ walk(v, n.Low)
+ }
+ if n.High != nil {
+ walk(v, n.High)
+ }
+
+ case *ast.CallExpr:
+ walk(v, n.Fun)
+ walkExprList(v, n.Args)
+
+ case *ast.UnaryExpr:
+ walk(v, n.X)
+
+ case *ast.BinaryExpr:
+ walk(v, n.X)
+ walk(v, n.Y)
+
+ // Declarations
+ case *ast.ImportSpec:
+ if n.Name != nil {
+ walk(v, n.Name)
+ }
+ walk(v, n.Path)
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.ImportDecl:
+ for _, s := range n.Specs {
+ walk(v, s)
+ }
+
+ case *ast.EmbedDecl:
+ walk(v, n.Expr)
+
+ case *ast.Alias:
+ walk(v, n.Ident)
+ walk(v, n.Expr)
+
+ case *ast.Comprehension:
+ for _, c := range n.Clauses {
+ walk(v, c)
+ }
+ walk(v, n.Value)
+
+ // Files and packages
+ case *ast.File:
+ walkDeclList(v, n.Decls)
+
+ case *ast.Package:
+ // The package identifier isn't really an identifier. Skip it.
+
+ case *ast.LetClause:
+ walk(v, n.Ident)
+ walk(v, n.Expr)
+
+ case *ast.ForClause:
+ if n.Key != nil {
+ walk(v, n.Key)
+ }
+ walk(v, n.Value)
+ walk(v, n.Source)
+
+ case *ast.IfClause:
+ walk(v, n.Condition)
+
+ default:
+ panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+ }
+
+ v.After(node)
+}
diff --git a/vendor/cuelang.org/go/cue/ast/comments.go b/vendor/cuelang.org/go/cue/ast/comments.go
new file mode 100644
index 0000000000..09d5402c88
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/comments.go
@@ -0,0 +1,46 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+// Comments returns all comments associated with a given node.
+func Comments(n Node) []*CommentGroup {
+ c := n.commentInfo()
+ if c == nil {
+ return nil
+ }
+ return c.Comments()
+}
+
+// AddComment adds the given comment to the node if it supports it.
+// If a node does not support comments, such as for CommentGroup or Comment,
+// this call has no effect.
+func AddComment(n Node, cg *CommentGroup) {
+ c := n.commentInfo()
+ if c == nil {
+ return
+ }
+ c.AddComment(cg)
+}
+
+// SetComments replaces all comments of n with the given set of comments.
+// If a node does not support comments, such as for CommentGroup or Comment,
+// this call has no effect.
+func SetComments(n Node, cgs []*CommentGroup) {
+ c := n.commentInfo()
+ if c == nil {
+ return
+ }
+ c.SetComments(cgs)
+}
diff --git a/vendor/cuelang.org/go/cue/ast/ident.go b/vendor/cuelang.org/go/cue/ast/ident.go
new file mode 100644
index 0000000000..1f400b2869
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/ident.go
@@ -0,0 +1,195 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ // TODO(mpvl): Is this correct?
+ return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
+}
+
+// IsValidIdent reports whether str is a valid identifier.
+func IsValidIdent(ident string) bool {
+ if ident == "" {
+ return false
+ }
+
+ // TODO: use consumed again to allow #0.
+ // consumed := false
+ if strings.HasPrefix(ident, "_") {
+ ident = ident[1:]
+ // consumed = true
+ if len(ident) == 0 {
+ return true
+ }
+ }
+ if strings.HasPrefix(ident, "#") {
+ ident = ident[1:]
+ // consumed = true
+ }
+
+ // if !consumed {
+ if r, _ := utf8.DecodeRuneInString(ident); isDigit(r) {
+ return false
+ }
+ // }
+
+ for _, r := range ident {
+ if isLetter(r) || isDigit(r) || r == '_' || r == '$' {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// ParseIdent unquotes a possibly quoted identifier and validates
+// if the result is valid.
+//
+// Deprecated: quoted identifiers are deprecated. Use aliases.
+func ParseIdent(n *Ident) (string, error) {
+ return parseIdent(n.NamePos, n.Name)
+}
+
+func parseIdent(pos token.Pos, ident string) (string, error) {
+ if ident == "" {
+ return "", errors.Newf(pos, "empty identifier")
+ }
+ quoted := false
+ if ident[0] == '`' {
+ u, err := strconv.Unquote(ident)
+ if err != nil {
+ return "", errors.Newf(pos, "invalid quoted identifier")
+ }
+ ident = u
+ quoted = true
+ }
+
+ p := 0
+ if strings.HasPrefix(ident, "_") {
+ p++
+ if len(ident) == 1 {
+ return ident, nil
+ }
+ }
+ if strings.HasPrefix(ident[p:], "#") {
+ p++
+ // if len(ident) == p {
+ // return "", errors.Newf(pos, "invalid identifier '_#'")
+ // }
+ }
+
+ if p == 0 || ident[p-1] == '#' {
+ if r, _ := utf8.DecodeRuneInString(ident[p:]); isDigit(r) {
+ return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r))
+ }
+ }
+
+ for _, r := range ident[p:] {
+ if isLetter(r) || isDigit(r) || r == '_' || r == '$' {
+ continue
+ }
+ if r == '-' && quoted {
+ continue
+ }
+ return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r))
+ }
+
+ return ident, nil
+}
+
+// LabelName reports the name of a label, whether it is an identifier
+// (it binds a value to a scope), and whether it is valid.
+// Keywords that are allowed in label positions are interpreted accordingly.
+//
+// Examples:
+//
+// Label Result
+// foo "foo" true nil
+// true "true" true nil
+// "foo" "foo" false nil
+// "x-y" "x-y" false nil
+// "foo "" false invalid string
+// "\(x)" "" false errors.Is(err, ErrIsExpression)
+// X=foo "foo" true nil
+//
+func LabelName(l Label) (name string, isIdent bool, err error) {
+ if a, ok := l.(*Alias); ok {
+ l, _ = a.Expr.(Label)
+ }
+ switch n := l.(type) {
+ case *ListLit:
+ // An expression, but not one that can evaluated.
+ return "", false, errors.Newf(l.Pos(),
+ "cannot reference fields with square brackets labels outside the field value")
+
+ case *Ident:
+ // TODO(legacy): use name = n.Name
+ name, err = ParseIdent(n)
+ if err != nil {
+ return "", false, err
+ }
+ isIdent = true
+ // TODO(legacy): remove this return once quoted identifiers are removed.
+ return name, isIdent, err
+
+ case *BasicLit:
+ switch n.Kind {
+ case token.STRING:
+ // Use strconv to only allow double-quoted, single-line strings.
+ name, err = strconv.Unquote(n.Value)
+ if err != nil {
+ err = errors.Newf(l.Pos(), "invalid")
+ }
+
+ case token.NULL, token.TRUE, token.FALSE:
+ name = n.Value
+ isIdent = true
+
+ default:
+ // TODO: allow numbers to be fields
+ // This includes interpolation and template labels.
+ return "", false, errors.Wrapf(ErrIsExpression, l.Pos(),
+ "cannot use numbers as fields")
+ }
+
+ default:
+ // This includes interpolation and template labels.
+ return "", false, errors.Wrapf(ErrIsExpression, l.Pos(),
+ "label is an expression")
+ }
+ if !IsValidIdent(name) {
+ isIdent = false
+ }
+ return name, isIdent, err
+
+}
+
+// ErrIsExpression reports whether a label is an expression.
+// This error is never returned directly. Use errors.Is.
+var ErrIsExpression = errors.New("not a concrete label")
diff --git a/vendor/cuelang.org/go/cue/ast/walk.go b/vendor/cuelang.org/go/cue/ast/walk.go
new file mode 100644
index 0000000000..a23fce454d
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/ast/walk.go
@@ -0,0 +1,265 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "fmt"
+
+ "cuelang.org/go/cue/token"
+)
+
+// Walk traverses an AST in depth-first order: It starts by calling f(node);
+// node must not be nil. If before returns true, Walk invokes f recursively for
+// each of the non-nil children of node, followed by a call of after. Both
+// functions may be nil. If before is nil, it is assumed to always return true.
+//
+func Walk(node Node, before func(Node) bool, after func(Node)) {
+ walk(&inspector{before: before, after: after}, node)
+}
+
+// A visitor's before method is invoked for each node encountered by Walk.
+// If the result visitor w is true, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.After.
+type visitor interface {
+ Before(node Node) (w visitor)
+ After(node Node)
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func walkExprList(v visitor, list []Expr) {
+ for _, x := range list {
+ walk(v, x)
+ }
+}
+
+func walkDeclList(v visitor, list []Decl) {
+ for _, x := range list {
+ walk(v, x)
+ }
+}
+
+// walk traverses an AST in depth-first order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+func walk(v visitor, node Node) {
+ if v = v.Before(node); v == nil {
+ return
+ }
+
+ // TODO: record the comment groups and interleave with the values like for
+ // parsing and printing?
+ for _, c := range Comments(node) {
+ walk(v, c)
+ }
+
+ // walk children
+ // (the order of the cases matches the order
+ // of the corresponding node types in go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *Comment:
+ // nothing to do
+
+ case *CommentGroup:
+ for _, c := range n.List {
+ walk(v, c)
+ }
+
+ case *Attribute:
+ // nothing to do
+
+ case *Field:
+ walk(v, n.Label)
+ if n.Value != nil {
+ walk(v, n.Value)
+ }
+ for _, a := range n.Attrs {
+ walk(v, a)
+ }
+
+ case *StructLit:
+ walkDeclList(v, n.Elts)
+
+ // Expressions
+ case *BottomLit, *BadExpr, *Ident, *BasicLit:
+ // nothing to do
+
+ case *Interpolation:
+ for _, e := range n.Elts {
+ walk(v, e)
+ }
+
+ case *ListLit:
+ walkExprList(v, n.Elts)
+
+ case *Ellipsis:
+ if n.Type != nil {
+ walk(v, n.Type)
+ }
+
+ case *ParenExpr:
+ walk(v, n.X)
+
+ case *SelectorExpr:
+ walk(v, n.X)
+ walk(v, n.Sel)
+
+ case *IndexExpr:
+ walk(v, n.X)
+ walk(v, n.Index)
+
+ case *SliceExpr:
+ walk(v, n.X)
+ if n.Low != nil {
+ walk(v, n.Low)
+ }
+ if n.High != nil {
+ walk(v, n.High)
+ }
+
+ case *CallExpr:
+ walk(v, n.Fun)
+ walkExprList(v, n.Args)
+
+ case *UnaryExpr:
+ walk(v, n.X)
+
+ case *BinaryExpr:
+ walk(v, n.X)
+ walk(v, n.Y)
+
+ // Declarations
+ case *ImportSpec:
+ if n.Name != nil {
+ walk(v, n.Name)
+ }
+ walk(v, n.Path)
+
+ case *BadDecl:
+ // nothing to do
+
+ case *ImportDecl:
+ for _, s := range n.Specs {
+ walk(v, s)
+ }
+
+ case *EmbedDecl:
+ walk(v, n.Expr)
+
+ case *LetClause:
+ walk(v, n.Ident)
+ walk(v, n.Expr)
+
+ case *Alias:
+ walk(v, n.Ident)
+ walk(v, n.Expr)
+
+ case *Comprehension:
+ for _, c := range n.Clauses {
+ walk(v, c)
+ }
+ walk(v, n.Value)
+
+ // Files and packages
+ case *File:
+ walkDeclList(v, n.Decls)
+
+ case *Package:
+ walk(v, n.Name)
+
+ case *ForClause:
+ if n.Key != nil {
+ walk(v, n.Key)
+ }
+ walk(v, n.Value)
+ walk(v, n.Source)
+
+ case *IfClause:
+ walk(v, n.Condition)
+
+ default:
+ panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+ }
+
+ v.After(node)
+}
+
+type inspector struct {
+ before func(Node) bool
+ after func(Node)
+
+ commentStack []commentFrame
+ current commentFrame
+}
+
+type commentFrame struct {
+ cg []*CommentGroup
+ pos int8
+}
+
+func (f *inspector) Before(node Node) visitor {
+ if f.before == nil || f.before(node) {
+ f.commentStack = append(f.commentStack, f.current)
+ f.current = commentFrame{cg: Comments(node)}
+ f.visitComments(f.current.pos)
+ return f
+ }
+ return nil
+}
+
+func (f *inspector) After(node Node) {
+ f.visitComments(127)
+ p := len(f.commentStack) - 1
+ f.current = f.commentStack[p]
+ f.commentStack = f.commentStack[:p]
+ f.current.pos++
+ if f.after != nil {
+ f.after(node)
+ }
+}
+
+func (f *inspector) Token(t token.Token) {
+ f.current.pos++
+}
+
+func (f *inspector) setPos(i int8) {
+ f.current.pos = i
+}
+
+func (f *inspector) visitComments(pos int8) {
+ c := &f.current
+ for ; len(c.cg) > 0; c.cg = c.cg[1:] {
+ cg := c.cg[0]
+ if cg.Position == pos {
+ continue
+ }
+ if f.before == nil || f.before(cg) {
+ for _, c := range cg.List {
+ if f.before == nil || f.before(c) {
+ if f.after != nil {
+ f.after(c)
+ }
+ }
+ }
+ if f.after != nil {
+ f.after(cg)
+ }
+ }
+ }
+}
diff --git a/vendor/cuelang.org/go/cue/attribute.go b/vendor/cuelang.org/go/cue/attribute.go
new file mode 100644
index 0000000000..dd874d6b39
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/attribute.go
@@ -0,0 +1,200 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "fmt"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/export"
+)
+
+// Attribute returns the attribute data for the given key.
+// The returned attribute will return an error for any of its methods if there
+// is no attribute for the requested key.
+func (v Value) Attribute(key string) Attribute {
+ // look up the attributes
+ if v.v == nil {
+ return nonExistAttr(key)
+ }
+ // look up the attributes
+ for _, a := range export.ExtractFieldAttrs(v.v) {
+ k, _ := a.Split()
+ if key != k {
+ continue
+ }
+ return newAttr(internal.FieldAttr, a)
+ }
+
+ return nonExistAttr(key)
+}
+
+func newAttr(k internal.AttrKind, a *ast.Attribute) Attribute {
+ key, body := a.Split()
+ x := internal.ParseAttrBody(token.NoPos, body)
+ x.Name = key
+ x.Kind = k
+ return Attribute{x}
+}
+
+func nonExistAttr(key string) Attribute {
+ a := internal.NewNonExisting(key)
+ a.Name = key
+ a.Kind = internal.FieldAttr
+ return Attribute{a}
+}
+
+// Attributes reports all field attributes for the Value.
+//
+// To retrieve attributes of multiple kinds, you can bitwise-or kinds together.
+// Use ValueKind to query attributes associated with a value.
+func (v Value) Attributes(mask AttrKind) []Attribute {
+ if v.v == nil {
+ return nil
+ }
+
+ attrs := []Attribute{}
+
+ if mask&FieldAttr != 0 {
+ for _, a := range export.ExtractFieldAttrs(v.v) {
+ attrs = append(attrs, newAttr(internal.FieldAttr, a))
+ }
+ }
+
+ if mask&DeclAttr != 0 {
+ for _, a := range export.ExtractDeclAttrs(v.v) {
+ attrs = append(attrs, newAttr(internal.DeclAttr, a))
+ }
+ }
+
+ return attrs
+}
+
+// AttrKind indicates the location of an attribute within CUE source.
+type AttrKind int
+
+const (
+ // FieldAttr indicates a field attribute.
+ // foo: bar @attr()
+ FieldAttr AttrKind = AttrKind(internal.FieldAttr)
+
+ // DeclAttr indicates a declaration attribute.
+ // foo: {
+ // @attr()
+ // }
+ DeclAttr AttrKind = AttrKind(internal.DeclAttr)
+
+ // A ValueAttr is a bit mask to request any attribute that is locally
+ // associated with a field, instead of, for instance, an entire file.
+ ValueAttr AttrKind = FieldAttr | DeclAttr
+
+ // TODO: Possible future attr kinds
+ // ElemAttr (is a ValueAttr)
+ // FileAttr (not a ValueAttr)
+
+ // TODO: Merge: merge namesake attributes.
+)
+
+// An Attribute contains meta data about a field.
+type Attribute struct {
+ attr internal.Attr
+}
+
+// Format implements fmt.Formatter.
+func (a Attribute) Format(w fmt.State, verb rune) {
+ fmt.Fprintf(w, "@%s(%s)", a.attr.Name, a.attr.Body)
+}
+
+var _ fmt.Formatter = &Attribute{}
+
+// Name returns the name of the attribute, for instance, "json" for @json(...).
+func (a *Attribute) Name() string {
+ return a.attr.Name
+}
+
+// Contents reports the full contents of an attribute within parentheses, so
+// contents in @attr(contents).
+func (a *Attribute) Contents() string {
+ return a.attr.Body
+}
+
+// NumArgs reports the number of arguments parsed for this attribute.
+func (a *Attribute) NumArgs() int {
+ return len(a.attr.Fields)
+}
+
+// Arg reports the contents of the ith comma-separated argument of a.
+//
+// If the argument contains an unescaped equals sign, it returns a key-value
+// pair. Otherwise it returns the contents in value.
+func (a *Attribute) Arg(i int) (key, value string) {
+ f := a.attr.Fields[i]
+ return f.Key(), f.Value()
+}
+
+// RawArg reports the raw contents of the ith comma-separated argument of a,
+// including surrounding spaces.
+func (a *Attribute) RawArg(i int) string {
+ return a.attr.Fields[i].Text()
+}
+
+// Kind reports the type of location within CUE source where the attribute
+// was specified.
+func (a *Attribute) Kind() AttrKind {
+ return AttrKind(a.attr.Kind)
+}
+
+// Err returns the error associated with this Attribute or nil if this
+// attribute is valid.
+func (a *Attribute) Err() error {
+ return a.attr.Err
+}
+
+// String reports the possibly empty string value at the given position or
+// an error the attribute is invalid or if the position does not exist.
+func (a *Attribute) String(pos int) (string, error) {
+ return a.attr.String(pos)
+}
+
+// Int reports the integer at the given position or an error if the attribute is
+// invalid, the position does not exist, or the value at the given position is
+// not an integer.
+func (a *Attribute) Int(pos int) (int64, error) {
+ return a.attr.Int(pos)
+}
+
+// Flag reports whether an entry with the given name exists at position pos or
+// onwards or an error if the attribute is invalid or if the first pos-1 entries
+// are not defined.
+func (a *Attribute) Flag(pos int, key string) (bool, error) {
+ return a.attr.Flag(pos, key)
+}
+
+// Lookup searches for an entry of the form key=value from position pos onwards
+// and reports the value if found. It reports an error if the attribute is
+// invalid or if the first pos-1 entries are not defined.
+func (a *Attribute) Lookup(pos int, key string) (val string, found bool, err error) {
+ val, found, err = a.attr.Lookup(pos, key)
+
+ // TODO: remove at some point. This is an ugly hack to simulate the old
+ // behavior of protobufs.
+ if !found && a.attr.Name == "protobuf" && key == "type" {
+ val, err = a.String(1)
+ found = err == nil
+ }
+ return val, found, err
+}
diff --git a/vendor/cuelang.org/go/cue/build.go b/vendor/cuelang.org/go/cue/build.go
new file mode 100644
index 0000000000..82863ee014
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/build.go
@@ -0,0 +1,157 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/runtime"
+)
+
+// A Runtime is used for creating CUE Values.
+//
+// Any operation that involves two Values or Instances should originate from
+// the same Runtime.
+//
+// The zero value of Runtime works for legacy reasons, but
+// should not be used. It may panic at some point.
+//
+// Deprecated: use Context.
+type Runtime runtime.Runtime
+
+func (r *Runtime) runtime() *runtime.Runtime {
+ rt := (*runtime.Runtime)(r)
+ rt.Init()
+ return rt
+}
+
+type hiddenRuntime = Runtime
+
+func (r *Runtime) complete(p *build.Instance, v *adt.Vertex) (*Instance, error) {
+ idx := r.runtime()
+ inst := getImportFromBuild(idx, p, v)
+ inst.ImportPath = p.ImportPath
+ if inst.Err != nil {
+ return nil, inst.Err
+ }
+ return inst, nil
+}
+
+// Compile compiles the given source into an Instance. The source code may be
+// provided as a string, byte slice, io.Reader. The name is used as the file
+// name in position information. The source may import builtin packages. Use
+// Build to allow importing non-builtin packages.
+//
+// Deprecated: use Parse or ParseBytes. The use of Instance is being phased out.
+func (r *hiddenRuntime) Compile(filename string, source interface{}) (*Instance, error) {
+ cfg := &runtime.Config{Filename: filename}
+ v, p := r.runtime().Compile(cfg, source)
+ return r.complete(p, v)
+}
+
+// CompileFile compiles the given source file into an Instance. The source may
+// import builtin packages. Use Build to allow importing non-builtin packages.
+//
+// Deprecated: use BuildFile. The use of Instance is being phased out.
+func (r *hiddenRuntime) CompileFile(file *ast.File) (*Instance, error) {
+ v, p := r.runtime().CompileFile(nil, file)
+ return r.complete(p, v)
+}
+
+// CompileExpr compiles the given source expression into an Instance. The source
+// may import builtin packages. Use Build to allow importing non-builtin
+// packages.
+//
+// Deprecated: use BuildExpr. The use of Instance is being phased out.
+func (r *hiddenRuntime) CompileExpr(expr ast.Expr) (*Instance, error) {
+ f, err := astutil.ToFile(expr)
+ if err != nil {
+ return nil, err
+ }
+ v := (*Context)(r).BuildExpr(expr)
+ err = v.Err()
+ inst := &Instance{
+ index: r.runtime(),
+ root: v.v,
+ inst: &build.Instance{
+ Files: []*ast.File{f},
+ },
+ Err: errors.Promote(err, ""),
+ Incomplete: err != nil,
+ }
+ return inst, err
+}
+
+// Parse parses a CUE source value into a CUE Instance. The source code may be
+// provided as a string, byte slice, or io.Reader. The name is used as the file
+// name in position information. The source may import builtin packages.
+//
+// Deprecated: use CompileString or CompileBytes. The use of Instance is being
+// phased out.
+func (r *hiddenRuntime) Parse(name string, source interface{}) (*Instance, error) {
+ return r.Compile(name, source)
+}
+
+// Build creates an Instance from the given build.Instance. A returned Instance
+// may be incomplete, in which case its Err field is set.
+//
+// Deprecated: use Context.BuildInstance. The use of Instance is being phased
+// out.
+func (r *hiddenRuntime) Build(p *build.Instance) (*Instance, error) {
+ v, _ := r.runtime().Build(nil, p)
+ return r.complete(p, v)
+}
+
+// Deprecated: use cuecontext.Context.BuildInstances. The use of Instance is
+// being phased out.
+func Build(instances []*build.Instance) []*Instance {
+ if len(instances) == 0 {
+ panic("cue: list of instances must not be empty")
+ }
+ var r Runtime
+ a, _ := r.build(instances)
+ return a
+}
+
+func (r *hiddenRuntime) build(instances []*build.Instance) ([]*Instance, error) {
+ index := r.runtime()
+
+ loaded := []*Instance{}
+
+ var errs errors.Error
+
+ for _, p := range instances {
+ v, _ := index.Build(nil, p)
+ i := getImportFromBuild(index, p, v)
+ errs = errors.Append(errs, i.Err)
+ loaded = append(loaded, i)
+ }
+
+ // TODO: insert imports
+ return loaded, errs
+}
+
+// FromExpr creates an instance from an expression.
+// Any references must be resolved beforehand.
+//
+// Deprecated: use CompileExpr
+func (r *hiddenRuntime) FromExpr(expr ast.Expr) (*Instance, error) {
+ return r.CompileFile(&ast.File{
+ Decls: []ast.Decl{&ast.EmbedDecl{Expr: expr}},
+ })
+}
diff --git a/vendor/cuelang.org/go/cue/build/context.go b/vendor/cuelang.org/go/cue/build/context.go
new file mode 100644
index 0000000000..664326eeef
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/build/context.go
@@ -0,0 +1,128 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package build defines data types and utilities for defining CUE configuration
+// instances.
+//
+// This package enforces the rules regarding packages and instances as defined
+// in the spec, but it leaves any other details, as well as handling of modules,
+// up to the implementation.
+//
+// A full implementation of instance loading can be found in the loader package.
+//
+// WARNING: this packages may change. It is fine to use load and cue, who both
+// use this package.
+package build
+
+import (
+ "context"
+
+ "cuelang.org/go/cue/ast"
+)
+
+// A Context keeps track of state of building instances and caches work.
+type Context struct {
+ ctxt context.Context
+
+ loader LoadFunc
+ parseFunc func(str string, src interface{}) (*ast.File, error)
+
+ initialized bool
+
+ imports map[string]*Instance
+}
+
+// NewInstance creates an instance for this Context.
+func (c *Context) NewInstance(dir string, f LoadFunc) *Instance {
+ if c == nil {
+ c = &Context{}
+ }
+ if f == nil {
+ f = c.loader
+ }
+ return &Instance{
+ ctxt: c,
+ loadFunc: f,
+ Dir: dir,
+ }
+}
+
+// Complete finishes the initialization of an instance. All files must have
+// been added with AddFile before this call.
+func (inst *Instance) Complete() error {
+ if inst.done {
+ return inst.Err
+ }
+ inst.done = true
+
+ err := inst.complete()
+ if err != nil {
+ inst.ReportError(err)
+ }
+ if inst.Err != nil {
+ inst.Incomplete = true
+ return inst.Err
+ }
+ return nil
+}
+
+func (c *Context) init() {
+ if !c.initialized {
+ c.initialized = true
+ c.ctxt = context.Background()
+ c.imports = map[string]*Instance{}
+ }
+}
+
+// Options:
+// - certain parse modes
+// - parallellism
+// - error handler (allows cancelling the context)
+// - file set.
+
+// NewContext creates a new build context.
+//
+// All instances must be created with a context.
+func NewContext(opts ...Option) *Context {
+ c := &Context{}
+ for _, o := range opts {
+ o(c)
+ }
+ c.init()
+ return c
+}
+
+// Option define build options.
+type Option func(c *Context)
+
+// Loader sets parsing options.
+func Loader(f LoadFunc) Option {
+ return func(c *Context) { c.loader = f }
+}
+
+// ParseFile is called to read and parse each file
+// when building syntax tree.
+// It must be safe to call ParseFile simultaneously from multiple goroutines.
+// If ParseFile is nil, the loader will uses parser.ParseFile.
+//
+// ParseFile should parse the source from src and use filename only for
+// recording position information.
+//
+// An application may supply a custom implementation of ParseFile
+// to change the effective file contents or the behavior of the parser,
+// or to modify the syntax tree. For example, changing the backwards
+// compatibility.
+func ParseFile(f func(filename string, src interface{}) (*ast.File, error)) Option {
+ return func(c *Context) { c.parseFunc = f }
+}
diff --git a/vendor/cuelang.org/go/cue/build/doc.go b/vendor/cuelang.org/go/cue/build/doc.go
new file mode 100644
index 0000000000..52421c65d8
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/build/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package build defines collections of CUE files to build an instance.
+package build // import "cuelang.org/go/cue/build"
diff --git a/vendor/cuelang.org/go/cue/build/file.go b/vendor/cuelang.org/go/cue/build/file.go
new file mode 100644
index 0000000000..7b22d2eda6
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/build/file.go
@@ -0,0 +1,86 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package build
+
+import "cuelang.org/go/cue/errors"
+
+// A File represents a file that is part of the build process.
+type File struct {
+ Filename string `json:"filename"`
+
+ Encoding Encoding `json:"encoding,omitempty"`
+ Interpretation Interpretation `json:"interpretation,omitempty"`
+ Form Form `json:"form,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"` // code=go
+
+ ExcludeReason errors.Error `json:"-"`
+ Source interface{} `json:"-"` // TODO: swap out with concrete type.
+}
+
+// A Encoding indicates a file format for representing a program.
+type Encoding string
+
+const (
+ CUE Encoding = "cue"
+ JSON Encoding = "json"
+ YAML Encoding = "yaml"
+ JSONL Encoding = "jsonl"
+ Text Encoding = "text"
+ Binary Encoding = "binary"
+ Protobuf Encoding = "proto"
+ TextProto Encoding = "textproto"
+ BinaryProto Encoding = "pb"
+
+ // TODO:
+ // TOML
+
+ Code Encoding = "code" // Programming languages
+)
+
+// An Interpretation determines how a certain program should be interpreted.
+// For instance, data may be interpreted as describing a schema, which itself
+// can be converted to a CUE schema.
+type Interpretation string
+
+const (
+ // Auto interprets the underlying data file as data, JSON Schema or OpenAPI,
+ // depending on the existence of certain marker fields.
+ //
+ // JSON Schema is identified by a top-level "$schema" field with a URL
+ // of the form "https?://json-schema.org/.*schema#?".
+ //
+ // OpenAPI is identified by the existence of a top-level field "openapi"
+ // with a major semantic version of 3, as well as the existence of
+ // the info.title and info.version fields.
+ //
+ // In all other cases, the underlying data is interpreted as is.
+ Auto Interpretation = "auto"
+ JSONSchema Interpretation = "jsonschema"
+ OpenAPI Interpretation = "openapi"
+ ProtobufJSON Interpretation = "pb"
+)
+
+// A Form specifies the form in which a program should be represented.
+type Form string
+
+const (
+ Full Form = "full"
+ Schema Form = "schema"
+ Struct Form = "struct"
+ Final Form = "final" // picking default values, may be non-concrete
+ Graph Form = "graph" // Data only, but allow references
+ DAG Form = "dag" // Like graph, but don't allow cycles
+ Data Form = "data" // always final
+)
diff --git a/vendor/cuelang.org/go/cue/build/import.go b/vendor/cuelang.org/go/cue/build/import.go
new file mode 100644
index 0000000000..996edb0afe
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/build/import.go
@@ -0,0 +1,170 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package build
+
+import (
+ "sort"
+ "strconv"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+type LoadFunc func(pos token.Pos, path string) *Instance
+
+type cueError = errors.Error
+
+type buildError struct {
+ cueError
+ inputs []token.Pos
+}
+
+func (e *buildError) InputPositions() []token.Pos {
+ return e.inputs
+}
+
+func (inst *Instance) complete() errors.Error {
+ // TODO: handle case-insensitive collisions.
+ // dir := inst.Dir
+ // names := []string{}
+ // for _, src := range sources {
+ // names = append(names, src.path)
+ // }
+ // f1, f2 := str.FoldDup(names)
+ // if f1 != "" {
+ // return nil, fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2)
+ // }
+
+ var (
+ c = inst.ctxt
+ imported = map[string][]token.Pos{}
+ )
+
+ for _, f := range inst.Files {
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.ImportDecl)
+ if !ok {
+ continue
+ }
+ for _, spec := range d.Specs {
+ quoted := spec.Path.Value
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ inst.Err = errors.Append(inst.Err,
+ errors.Newf(
+ spec.Path.Pos(),
+ "%s: parser returned invalid quoted string: <%s>",
+ f.Filename, quoted))
+ }
+ imported[path] = append(imported[path], spec.Pos())
+ }
+ }
+ }
+
+ paths := make([]string, 0, len(imported))
+ for path := range imported {
+ paths = append(paths, path)
+ if path == "" {
+ return &buildError{
+ errors.Newf(token.NoPos, "empty import path"),
+ imported[path],
+ }
+ }
+ }
+
+ sort.Strings(paths)
+
+ if inst.loadFunc != nil {
+ for i, path := range paths {
+ isLocal := IsLocalImport(path)
+ if isLocal {
+ // path = dirToImportPath(filepath.Join(dir, path))
+ }
+
+ imp := c.imports[path]
+ if imp == nil {
+ pos := token.NoPos
+ if len(imported[path]) > 0 {
+ pos = imported[path][0]
+ }
+ imp = inst.loadFunc(pos, path)
+ if imp == nil {
+ continue
+ }
+ if imp.Err != nil {
+ return errors.Wrapf(imp.Err, pos, "import failed")
+ }
+ imp.ImportPath = path
+ // imp.parent = inst
+ c.imports[path] = imp
+ // imp.parent = nil
+ } else if imp.parent != nil {
+ // TODO: report a standard cycle message.
+ // cycle is now handled explicitly in loader
+ }
+ paths[i] = imp.ImportPath
+
+ inst.addImport(imp)
+ if imp.Incomplete {
+ inst.Incomplete = true
+ }
+ }
+ }
+
+ inst.ImportPaths = paths
+ inst.ImportPos = imported
+
+ // Build full dependencies
+ deps := make(map[string]*Instance)
+ var q []*Instance
+ q = append(q, inst.Imports...)
+ for i := 0; i < len(q); i++ {
+ p1 := q[i]
+ path := p1.ImportPath
+ // The same import path could produce an error or not,
+ // depending on what tries to import it.
+ // Prefer to record entries with errors, so we can report them.
+ // p0 := deps[path]
+ // if err0, err1 := lastError(p0), lastError(p1); p0 == nil || err1 != nil && (err0 == nil || len(err0.ImportStack) > len(err1.ImportStack)) {
+ // deps[path] = p1
+ // for _, p2 := range p1.Imports {
+ // if deps[p2.ImportPath] != p2 {
+ // q = append(q, p2)
+ // }
+ // }
+ // }
+ if _, ok := deps[path]; !ok {
+ deps[path] = p1
+ }
+ }
+ inst.Deps = make([]string, 0, len(deps))
+ for dep := range deps {
+ inst.Deps = append(inst.Deps, dep)
+ }
+ sort.Strings(inst.Deps)
+
+ for _, dep := range inst.Deps {
+ p1 := deps[dep]
+ if p1 == nil {
+ panic("impossible: missing entry in package cache for " + dep + " imported by " + inst.ImportPath)
+ }
+ if p1.Err != nil {
+ inst.DepsErrors = append(inst.DepsErrors, p1.Err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/cuelang.org/go/cue/build/instance.go b/vendor/cuelang.org/go/cue/build/instance.go
new file mode 100644
index 0000000000..cc0abb8ae0
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/build/instance.go
@@ -0,0 +1,287 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package build
+
+import (
+ "fmt"
+ pathpkg "path"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+)
+
+// An Instance describes the collection of files, and its imports, necessary
+// to build a CUE instance.
+//
+// A typical way to create an Instance is to use the cue/load package.
+type Instance struct {
+ ctxt *Context
+
+ BuildFiles []*File // files to be included in the build
+ IgnoredFiles []*File // files excluded for this build
+ OrphanedFiles []*File // recognized file formats not part of any build
+ InvalidFiles []*File // could not parse these files
+ UnknownFiles []*File // unknown file types
+
+ User bool // True if package was created from individual files.
+
+ // Files contains the AST for all files part of this instance.
+ // TODO: the intent is to deprecate this in favor of BuildFiles.
+ Files []*ast.File
+
+ loadFunc LoadFunc
+ done bool
+
+ // PkgName is the name specified in the package clause.
+ PkgName string
+ hasName bool
+
+ // ImportPath returns the unique path to identify an imported instance.
+ //
+ // Instances created with NewInstance do not have an import path.
+ ImportPath string
+
+ // Imports lists the instances of all direct imports of this instance.
+ Imports []*Instance
+
+ // The Err for loading this package or nil on success. This does not
+ // include any errors of dependencies. Incomplete will be set if there
+ // were any errors in dependencies.
+ Err errors.Error
+
+ parent *Instance // TODO: for cycle detection
+
+ // The following fields are for informative purposes and are not used by
+ // the cue package to create an instance.
+
+ // DisplayPath is a user-friendly version of the package or import path.
+ DisplayPath string
+
+ // Module defines the module name of a package. It must be defined if
+ // the packages within the directory structure of the module are to be
+ // imported by other packages, including those within the module.
+ Module string
+
+ // Root is the root of the directory hierarchy, it may be "" if this an
+ // instance has no imports.
+ // If Module != "", this corresponds to the module root.
+ // Root/pkg is the directory that holds third-party packages.
+ Root string // root directory of hierarchy ("" if unknown)
+
+ // Dir is the package directory. A package may also include files from
+ // ancestor directories, up to the module file.
+ Dir string
+
+ // NOTICE: the below tags may change in the future.
+
+ // ImportComment is the path in the import comment on the package statement.
+ ImportComment string `api:"alpha"`
+
+ // AllTags are the build tags that can influence file selection in this
+ // directory.
+ AllTags []string `api:"alpha"`
+
+ // Incomplete reports whether any dependencies had an error.
+ Incomplete bool `api:"alpha"`
+
+ // Dependencies
+ // ImportPaths gives the transitive dependencies of all imports.
+ ImportPaths []string `api:"alpha"`
+ ImportPos map[string][]token.Pos `api:"alpha"` // line information for Imports
+
+ Deps []string `api:"alpha"`
+ DepsErrors []error `api:"alpha"`
+ Match []string `api:"alpha"`
+}
+
+// RelPath reports the path of f relative to the root of the instance's module
+// directory. The full path is returned if a relative path could not be found.
+func (inst *Instance) RelPath(f *File) string {
+ p, err := filepath.Rel(inst.Root, f.Filename)
+ if err != nil {
+ return f.Filename
+ }
+ return p
+}
+
+// ID returns the package ID unique for this module.
+func (inst *Instance) ID() string {
+ if s := inst.ImportPath; s != "" {
+ return s
+ }
+ if inst.PkgName == "" {
+ return "_"
+ }
+ s := fmt.Sprintf("%s:%s", inst.Module, inst.PkgName)
+ return s
+}
+
+// Dependencies reports all Instances on which this instance depends.
+func (inst *Instance) Dependencies() []*Instance {
+ // TODO: as cyclic dependencies are not allowed, we could just not check.
+ // Do for safety now and remove later if needed.
+ return appendDependencies(nil, inst, map[*Instance]bool{})
+}
+
+func appendDependencies(a []*Instance, inst *Instance, done map[*Instance]bool) []*Instance {
+ for _, d := range inst.Imports {
+ if done[d] {
+ continue
+ }
+ a = append(a, d)
+ done[d] = true
+ a = appendDependencies(a, d, done)
+ }
+ return a
+}
+
+// Abs converts relative path used in the one of the file fields to an
+// absolute one.
+func (inst *Instance) Abs(path string) string {
+ if filepath.IsAbs(path) {
+ return path
+ }
+ return filepath.Join(inst.Root, path)
+}
+
+func (inst *Instance) setPkg(pkg string) bool {
+ if !inst.hasName {
+ inst.hasName = true
+ inst.PkgName = pkg
+ return true
+ }
+ return false
+}
+
+// ReportError reports an error processing this instance.
+func (inst *Instance) ReportError(err errors.Error) {
+ inst.Err = errors.Append(inst.Err, err)
+}
+
+// Context defines the build context for this instance. All files defined
+// in Syntax as well as all imported instances must be created using the
+// same build context.
+func (inst *Instance) Context() *Context {
+ return inst.ctxt
+}
+
+func (inst *Instance) parse(name string, src interface{}) (*ast.File, error) {
+ if inst.ctxt != nil && inst.ctxt.parseFunc != nil {
+ return inst.ctxt.parseFunc(name, src)
+ }
+ return parser.ParseFile(name, src, parser.ParseComments)
+}
+
+// LookupImport defines a mapping from an ImportSpec's ImportPath to Instance.
+func (inst *Instance) LookupImport(path string) *Instance {
+ path = inst.expandPath(path)
+ for _, inst := range inst.Imports {
+ if inst.ImportPath == path {
+ return inst
+ }
+ }
+ return nil
+}
+
+func (inst *Instance) addImport(imp *Instance) {
+ for _, inst := range inst.Imports {
+ if inst.ImportPath == imp.ImportPath {
+ if inst != imp {
+ panic("import added multiple times with different instances")
+ }
+ return
+ }
+ }
+ inst.Imports = append(inst.Imports, imp)
+}
+
+// AddFile adds the file with the given name to the list of files for this
+// instance. The file may be loaded from the cache of the instance's context.
+// It does not process the file's imports. The package name of the file must
+// match the package name of the instance.
+//
+// Deprecated: use AddSyntax or wait for this to be renamed using a new
+// signature.
+func (inst *Instance) AddFile(filename string, src interface{}) error {
+ file, err := inst.parse(filename, src)
+ if err != nil {
+ // should always be an errors.List, but just in case.
+ err := errors.Promote(err, "error adding file")
+ inst.ReportError(err)
+ return err
+ }
+
+ return inst.AddSyntax(file)
+}
+
+// AddSyntax adds the given file to list of files for this instance. The package
+// name of the file must match the package name of the instance.
+func (inst *Instance) AddSyntax(file *ast.File) errors.Error {
+ astutil.Resolve(file, func(pos token.Pos, msg string, args ...interface{}) {
+ inst.Err = errors.Append(inst.Err, errors.Newf(pos, msg, args...))
+ })
+ _, pkg, pos := internal.PackageInfo(file)
+ if pkg != "" && pkg != "_" && !inst.setPkg(pkg) && pkg != inst.PkgName {
+ err := errors.Newf(pos,
+ "package name %q conflicts with previous package name %q",
+ pkg, inst.PkgName)
+ inst.ReportError(err)
+ return err
+ }
+ inst.Files = append(inst.Files, file)
+ return nil
+}
+
+func (inst *Instance) expandPath(path string) string {
+ isLocal := IsLocalImport(path)
+ if isLocal {
+ path = dirToImportPath(filepath.Join(inst.Dir, path))
+ }
+ return path
+}
+
+// dirToImportPath returns the pseudo-import path we use for a package
+// outside the CUE path. It begins with _/ and then contains the full path
+// to the directory. If the package lives in c:\home\gopher\my\pkg then
+// the pseudo-import path is _/c_/home/gopher/my/pkg.
+// Using a pseudo-import path like this makes the ./ imports no longer
+// a special case, so that all the code to deal with ordinary imports works
+// automatically.
+func dirToImportPath(dir string) string {
+ return pathpkg.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir)))
+}
+
+func makeImportValid(r rune) rune {
+ // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport.
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return '_'
+ }
+ return r
+}
+
+// IsLocalImport reports whether the import path is
+// a local import path, like ".", "..", "./foo", or "../foo".
+func IsLocalImport(path string) bool {
+ return path == "." || path == ".." ||
+ strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
+}
diff --git a/vendor/cuelang.org/go/cue/builtin.go b/vendor/cuelang.org/go/cue/builtin.go
new file mode 100644
index 0000000000..74aa56bbf7
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/builtin.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func pos(n adt.Node) (p token.Pos) {
+ if n == nil {
+ return
+ }
+ src := n.Source()
+ if src == nil {
+ return
+ }
+ return src.Pos()
+}
diff --git a/vendor/cuelang.org/go/cue/builtinutil.go b/vendor/cuelang.org/go/cue/builtinutil.go
new file mode 100644
index 0000000000..2bfd8adf74
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/builtinutil.go
@@ -0,0 +1,45 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+// TODO: this code could be generated, but currently isn't.
+
+type valueSorter struct {
+ a []Value
+ cmp Value
+ err error
+}
+
+func (s *valueSorter) ret() ([]Value, error) {
+ if s.err != nil {
+ return nil, s.err
+ }
+ // The input slice is already a copy and that we can modify it safely.
+ return s.a, nil
+}
+
+func (s *valueSorter) Len() int { return len(s.a) }
+func (s *valueSorter) Swap(i, j int) { s.a[i], s.a[j] = s.a[j], s.a[i] }
+func (s *valueSorter) Less(i, j int) bool {
+ v := s.cmp.Fill(s.a[i], "x")
+ v = v.Fill(s.a[j], "y")
+
+ isLess, err := v.Lookup("less").Bool()
+ if err != nil && s.err == nil {
+ s.err = err
+ return true
+ }
+ return isLess
+}
diff --git a/vendor/cuelang.org/go/cue/context.go b/vendor/cuelang.org/go/cue/context.go
new file mode 100644
index 0000000000..3da628f581
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/context.go
@@ -0,0 +1,473 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/compile"
+ "cuelang.org/go/internal/core/convert"
+ "cuelang.org/go/internal/core/debug"
+ "cuelang.org/go/internal/core/eval"
+ "cuelang.org/go/internal/core/runtime"
+)
+
+// A Context is used for creating CUE Values.
+//
+// A Context keeps track of loaded instances, indices of internal
+// representations of values, and defines the set of supported builtins. Any
+// operation that involves two Values should originate from the same Context.
+//
+// Use
+//
+// ctx := cuecontext.New()
+//
+// to create a new Context.
+type Context runtime.Runtime
+
+func (c *Context) runtime() *runtime.Runtime {
+ rt := (*runtime.Runtime)(c)
+ return rt
+}
+
+func (c *Context) ctx() *adt.OpContext {
+ return newContext(c.runtime())
+}
+
+// Context reports the Context with which this value was created.
+func (v Value) Context() *Context {
+ return (*Context)(v.idx)
+}
+
+// A BuildOption defines options for the various build-related methods of
+// Context.
+type BuildOption func(o *runtime.Config)
+
+// Scope defines a context in which to resolve unresolved identifiers.
+//
+// Only one scope may be given. It panics if more than one scope is given
+// or if the Context in which scope was created differs from the one where
+// this option is used.
+func Scope(scope Value) BuildOption {
+ return func(o *runtime.Config) {
+ if o.Runtime != scope.idx {
+ panic("incompatible runtime")
+ }
+ if o.Scope != nil {
+ panic("more than one scope is given")
+ }
+ o.Scope = valueScope(scope)
+ }
+}
+
+// Filename assigns a filename to parsed content.
+func Filename(filename string) BuildOption {
+ return func(o *runtime.Config) { o.Filename = filename }
+}
+
+// ImportPath defines the import path to use for building CUE. The import path
+// influences the scope in which identifiers occurring in the input CUE are
+// defined. Passing the empty string is equal to not specifying this option.
+//
+// This option is typically not necessary when building using a build.Instance,
+// but takes precedence otherwise.
+func ImportPath(path string) BuildOption {
+ return func(o *runtime.Config) { o.ImportPath = path }
+}
+
+// InferBuiltins allows unresolved references to bind to builtin packages with a
+// unique package name.
+//
+// This option is intended for evaluating expressions in a context where import
+// statements cannot be used. It is not recommended to use this for evaluating
+// CUE files.
+func InferBuiltins(elide bool) BuildOption {
+ return func(o *runtime.Config) {
+ o.Imports = func(x *ast.Ident) (pkgPath string) {
+ return o.Runtime.BuiltinPackagePath(x.Name)
+ }
+ }
+}
+
+func (c *Context) parseOptions(options []BuildOption) (cfg runtime.Config) {
+ cfg.Runtime = (*runtime.Runtime)(c)
+ for _, f := range options {
+ f(&cfg)
+ }
+ return cfg
+}
+
+// BuildInstance creates a Value from the given build.Instance.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+func (c *Context) BuildInstance(i *build.Instance, options ...BuildOption) Value {
+ cfg := c.parseOptions(options)
+ v, err := c.runtime().Build(&cfg, i)
+ if err != nil {
+ return c.makeError(err)
+ }
+ return c.make(v)
+}
+
+func (c *Context) makeError(err errors.Error) Value {
+ b := &adt.Bottom{Err: err}
+ node := &adt.Vertex{BaseValue: b}
+ node.UpdateStatus(adt.Finalized)
+ node.AddConjunct(adt.MakeRootConjunct(nil, b))
+ return c.make(node)
+}
+
+// BuildInstances creates a Value for each of the given instances and reports
+// the combined errors or nil if there were no errors.
+func (c *Context) BuildInstances(instances []*build.Instance) ([]Value, error) {
+ var errs errors.Error
+ var a []Value
+ for _, b := range instances {
+ v, err := c.runtime().Build(nil, b)
+ if err != nil {
+ errs = errors.Append(errs, err)
+ a = append(a, c.makeError(err))
+ } else {
+ a = append(a, c.make(v))
+ }
+ }
+ return a, errs
+}
+
+// BuildFile creates a Value from f.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+func (c *Context) BuildFile(f *ast.File, options ...BuildOption) Value {
+ cfg := c.parseOptions(options)
+ return c.compile(c.runtime().CompileFile(&cfg, f))
+}
+
+func (c *Context) compile(v *adt.Vertex, p *build.Instance) Value {
+ if p.Err != nil {
+ return c.makeError(p.Err)
+ }
+ return c.make(v)
+}
+
+// BuildExpr creates a Value from x.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+func (c *Context) BuildExpr(x ast.Expr, options ...BuildOption) Value {
+ r := c.runtime()
+ cfg := c.parseOptions(options)
+
+ ctx := c.ctx()
+
+ // TODO: move to runtime?: it probably does not make sense to treat BuildExpr
+ // and the expression resulting from CompileString differently.
+ astutil.ResolveExpr(x, errFn)
+
+ pkgPath := cfg.ImportPath
+ if pkgPath == "" {
+ pkgPath = anonymousPkg
+ }
+
+ conjunct, err := compile.Expr(&cfg.Config, r, pkgPath, x)
+ if err != nil {
+ return c.makeError(err)
+ }
+ v := adt.Resolve(ctx, conjunct)
+
+ return c.make(v)
+}
+
+func errFn(pos token.Pos, msg string, args ...interface{}) {}
+
+// resolveExpr binds unresolved expressions to values in the expression or v.
+func resolveExpr(ctx *adt.OpContext, v Value, x ast.Expr) adt.Value {
+ cfg := &compile.Config{Scope: valueScope(v)}
+
+ astutil.ResolveExpr(x, errFn)
+
+ c, err := compile.Expr(cfg, ctx, anonymousPkg, x)
+ if err != nil {
+ return &adt.Bottom{Err: err}
+ }
+ return adt.Resolve(ctx, c)
+}
+
+// anonymousPkg reports a package path that can never resolve to a valid package.
+const anonymousPkg = "_"
+
+// CompileString parses and build a Value from the given source string.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+func (c *Context) CompileString(src string, options ...BuildOption) Value {
+ cfg := c.parseOptions(options)
+ return c.compile(c.runtime().Compile(&cfg, src))
+}
+
+// CompileBytes parses and build a Value from the given source bytes.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+func (c *Context) CompileBytes(b []byte, options ...BuildOption) Value {
+ cfg := c.parseOptions(options)
+ return c.compile(c.runtime().Compile(&cfg, b))
+}
+
+// TODO: fs.FS or custom wrapper?
+// // CompileFile parses and build a Value from the given source bytes.
+// //
+// // The returned Value will represent an error, accessible through Err, if any
+// // error occurred.
+// func (c *Context) CompileFile(f fs.File, options ...BuildOption) Value {
+// b, err := io.ReadAll(f)
+// if err != nil {
+// return c.makeError(errors.Promote(err, "parsing file system file"))
+// }
+// return c.compile(c.runtime().Compile("", b))
+// }
+
+func (c *Context) make(v *adt.Vertex) Value {
+ return newValueRoot(c.runtime(), newContext(c.runtime()), v)
+}
+
+// An EncodeOption defines options for the various encoding-related methods of
+// Context.
+type EncodeOption func(*encodeOptions)
+
+type encodeOptions struct {
+ nilIsTop bool
+}
+
+func (o *encodeOptions) process(option []EncodeOption) {
+ for _, f := range option {
+ f(o)
+ }
+}
+
+// NilIsAny indicates whether a nil value is interpreted as null or _.
+//
+// The default is to interpret nil as _.
+func NilIsAny(isAny bool) EncodeOption {
+ return func(o *encodeOptions) { o.nilIsTop = isAny }
+}
+
+// Encode converts a Go value to a CUE value.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+//
+// Encode traverses the value v recursively. If an encountered value implements
+// the json.Marshaler interface and is not a nil pointer, Encode calls its
+// MarshalJSON method to produce JSON and convert that to CUE instead. If no
+// MarshalJSON method is present but the value implements encoding.TextMarshaler
+// instead, Encode calls its MarshalText method and encodes the result as a
+// string.
+//
+// Otherwise, Encode uses the following type-dependent default encodings:
+//
+// Boolean values encode as CUE booleans.
+//
+// Floating point, integer, and *big.Int and *big.Float values encode as CUE
+// numbers.
+//
+// String values encode as CUE strings coerced to valid UTF-8, replacing
+// sequences of invalid bytes with the Unicode replacement rune as per Unicode's
+// and W3C's recommendation.
+//
+// Array and slice values encode as CUE lists, except that []byte encodes as a
+// bytes value, and a nil slice encodes as the null.
+//
+// Struct values encode as CUE structs. Each exported struct field becomes a
+// member of the object, using the field name as the object key, unless the
+// field is omitted for one of the reasons given below.
+//
+// The encoding of each struct field can be customized by the format string
+// stored under the "json" key in the struct field's tag. The format string
+// gives the name of the field, possibly followed by a comma-separated list of
+// options. The name may be empty in order to specify options without overriding
+// the default field name.
+//
+// The "omitempty" option specifies that the field should be omitted from the
+// encoding if the field has an empty value, defined as false, 0, a nil pointer,
+// a nil interface value, and any empty array, slice, map, or string.
+//
+// See the documentation for Go's json.Marshal for more details on the field
+// tags and their meaning.
+//
+// Anonymous struct fields are usually encoded as if their inner exported
+// fields were fields in the outer struct, subject to the usual Go visibility
+// rules amended as described in the next paragraph. An anonymous struct field
+// with a name given in its JSON tag is treated as having that name, rather than
+// being anonymous. An anonymous struct field of interface type is treated the
+// same as having that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for when deciding which
+// field to encode or decode. If there are multiple fields at the same level,
+// and that level is the least nested (and would therefore be the nesting level
+// selected by the usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are
+// considered, even if there are multiple untagged fields that would otherwise
+// conflict.
+//
+// 2) If there is exactly one field (tagged or not according to the first rule),
+// that is selected.
+//
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Map values encode as CUE structs. The map's key type must either be a string,
+// an integer type, or implement encoding.TextMarshaler. The map keys are sorted
+// and used as CUE struct field names by applying the following rules, subject
+// to the UTF-8 coercion described for string values above:
+//
+// - keys of any string type are used directly
+// - encoding.TextMarshalers are marshaled
+// - integer keys are converted to strings
+//
+// Pointer values encode as the value pointed to. A nil pointer encodes as the
+// null CUE value.
+//
+// Interface values encode as the value contained in the interface. A nil
+// interface value encodes as the null CUE value. The NilIsAny EncodingOption
+// can be used to interpret nil as any (_) instead.
+//
+// Channel, complex, and function values cannot be encoded in CUE. Attempting to
+// encode such a value results in the returned value being an error, accessible
+// through the Err method.
+//
+func (c *Context) Encode(x interface{}, option ...EncodeOption) Value {
+ switch v := x.(type) {
+ case adt.Value:
+ return newValueRoot(c.runtime(), c.ctx(), v)
+ }
+ var options encodeOptions
+ options.process(option)
+
+ ctx := c.ctx()
+ // TODO: is true the right default?
+ expr := convert.GoValueToValue(ctx, x, options.nilIsTop)
+ n := &adt.Vertex{}
+ n.AddConjunct(adt.MakeRootConjunct(nil, expr))
+ n.Finalize(ctx)
+ return c.make(n)
+}
+
+// Encode converts a Go type to a CUE value.
+//
+// The returned Value will represent an error, accessible through Err, if any
+// error occurred.
+func (c *Context) EncodeType(x interface{}, option ...EncodeOption) Value {
+ switch v := x.(type) {
+ case *adt.Vertex:
+ return c.make(v)
+ }
+
+ ctx := c.ctx()
+ expr, err := convert.GoTypeToExpr(ctx, x)
+ if err != nil {
+ return c.makeError(err)
+ }
+ n := &adt.Vertex{}
+ n.AddConjunct(adt.MakeRootConjunct(nil, expr))
+ n.Finalize(ctx)
+ return c.make(n)
+}
+
+// NewList creates a Value that is a list of the given values.
+//
+// All Values must be created by c.
+func (c *Context) NewList(v ...Value) Value {
+ a := make([]adt.Value, len(v))
+ for i, x := range v {
+ if x.idx != (*runtime.Runtime)(c) {
+ panic("values must be from same Context")
+ }
+ a[i] = x.v
+ }
+ return c.make(c.ctx().NewList(a...))
+}
+
+// TODO:
+
+// func (c *Context) NewExpr(op Op, v ...Value) Value {
+// return Value{}
+// }
+
+// func (c *Context) NewValue(v ...ValueElem) Value {
+// return Value{}
+// }
+
+// func NewAttr(key string, values ...string) *Attribute {
+// return &Attribute{}
+// }
+
+// // Clear unloads all previously-loaded imports.
+// func (c *Context) Clear() {
+// }
+
+// // Values created up to the point of the Fork will be valid in both runtimes.
+// func (c *Context) Fork() *Context {
+// return nil
+// }
+
+// type ValueElem interface {
+// }
+
+// func NewField(sel Selector, value Value, attrs ...Attribute) ValueElem {
+// return nil
+// }
+
+// func NewDocComment(text string) ValueElem {
+// return nil
+// }
+
+// newContext returns a new evaluation context.
+func newContext(idx *runtime.Runtime) *adt.OpContext {
+ if idx == nil {
+ return nil
+ }
+ return eval.NewContext(idx, nil)
+}
+
+func debugStr(ctx *adt.OpContext, v adt.Node) string {
+ return debug.NodeString(ctx, v, nil)
+}
+
+func str(c *adt.OpContext, v adt.Node) string {
+ return debugStr(c, v)
+}
+
+// eval returns the evaluated value. This may not be the vertex.
+//
+// Deprecated: use ctx.value
+func (v Value) eval(ctx *adt.OpContext) adt.Value {
+ if v.v == nil {
+ panic("undefined value")
+ }
+ x := manifest(ctx, v.v)
+ return x.Value()
+}
+
+// TODO: change from Vertex to Vertex.
+func manifest(ctx *adt.OpContext, v *adt.Vertex) *adt.Vertex {
+ v.Finalize(ctx)
+ return v
+}
diff --git a/vendor/cuelang.org/go/cue/cue.go b/vendor/cuelang.org/go/cue/cue.go
new file mode 100644
index 0000000000..6f9622f644
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/cue.go
@@ -0,0 +1,43 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cue is the main API for CUE evaluation.
+//
+// Value is the main type that represents CUE evaluations. Values are created
+// with a cue.Context. Only values created from the same Context can be
+// involved in the same operation.
+//
+// A Context defines the set of active packages, the translations of field
+// names to unique codes, as well as the set of builtins. Use
+//
+// import "cuelang.org/go/cue/cuecontext"
+//
+// ctx := cuecontext.New()
+//
+// to obtain a context.
+//
+//
+// Note that the following types are DEPRECATED and their usage should be
+// avoided if possible:
+//
+// FieldInfo
+// Instance
+// Runtime
+// Struct
+//
+// Many types also have deprecated methods. Code that already uses deprecated
+// methods can keep using them for at least some time. We aim to provide a
+// go or cue fix solution to automatically rewrite code using the new API.
+//
+package cue
diff --git a/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go
new file mode 100644
index 0000000000..06080793e3
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go
@@ -0,0 +1,31 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cuecontext
+
+import (
+ "cuelang.org/go/cue"
+ "cuelang.org/go/internal/core/runtime"
+
+ _ "cuelang.org/go/pkg"
+)
+
+// Option controls a build context.
+type Option interface{ buildOption() }
+
+// New creates a new Context.
+func New(options ...Option) *cue.Context {
+ r := runtime.New()
+ return (*cue.Context)(r)
+}
diff --git a/vendor/cuelang.org/go/cue/decode.go b/vendor/cuelang.org/go/cue/decode.go
new file mode 100644
index 0000000000..f4adc5bd11
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/decode.go
@@ -0,0 +1,943 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal/core/adt"
+)
+
+// Decode initializes x with Value v. If x is a struct, it will validate the
+// constraints specified in the field tags.
+func (v Value) Decode(x interface{}) error {
+ var d decoder
+ w := reflect.ValueOf(x)
+ switch {
+ case !reflect.Indirect(w).CanSet():
+ d.addErr(errors.Newf(v.Pos(), "cannot decode into unsettable value"))
+
+ default:
+ if w.Kind() == reflect.Ptr {
+ w = w.Elem()
+ }
+ d.decode(w, v, false)
+ }
+ return d.errs
+}
+
+type decoder struct {
+ errs errors.Error
+}
+
+func (d *decoder) addErr(err error) {
+ if err != nil {
+ d.errs = errors.Append(d.errs, errors.Promote(err, ""))
+ }
+}
+
+func incompleteError(v Value) errors.Error {
+ return &valueError{
+ v: v,
+ err: &adt.Bottom{
+ Code: adt.IncompleteError,
+ Err: errors.Newf(v.Pos(),
+ "cannot convert non-concrete value %v", v)},
+ }
+}
+
+func (d *decoder) clear(x reflect.Value) {
+ if x.CanSet() {
+ x.Set(reflect.Zero(x.Type()))
+ }
+}
+
+func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) {
+ if !x.IsValid() {
+ d.addErr(errors.Newf(v.Pos(), "cannot decode into invalid value"))
+ return
+ }
+
+ v, _ = v.Default()
+ if v.v == nil {
+ d.clear(x)
+ return
+ }
+
+ if err := v.Err(); err != nil {
+ d.addErr(err)
+ return
+ }
+
+ switch x.Kind() {
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Interface:
+ // nullable types
+ if v.Null() == nil || !v.IsConcrete() {
+ d.clear(x)
+ return
+ }
+
+ default:
+ // TODO: allow incomplete values.
+ if !v.IsConcrete() {
+ d.addErr(incompleteError(v))
+ return
+ }
+ }
+
+ ij, it, x := indirect(x, v.Null() == nil)
+
+ if ij != nil {
+ b, err := v.marshalJSON()
+ d.addErr(err)
+ d.addErr(ij.UnmarshalJSON(b))
+ return
+ }
+
+ if it != nil {
+ b, err := v.Bytes()
+ if err != nil {
+ err = errors.Wrapf(err, v.Pos(), "Decode")
+ d.addErr(err)
+ return
+ }
+ d.addErr(it.UnmarshalText(b))
+ return
+ }
+
+ kind := x.Kind()
+
+ if kind == reflect.Interface {
+ value := d.interfaceValue(v)
+ x.Set(reflect.ValueOf(value))
+ return
+ }
+
+ switch kind {
+ case reflect.Ptr:
+ d.decode(x.Elem(), v, true)
+
+ case reflect.Bool:
+ b, err := v.Bool()
+ d.addErr(err)
+ x.SetBool(b)
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i, err := v.Int64()
+ d.addErr(err)
+ if x.OverflowInt(i) {
+ d.addErr(errors.Newf(v.Pos(), "integer %d overflows %s", i, kind))
+ break
+ }
+ x.SetInt(i)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i, err := v.Uint64()
+ d.addErr(err)
+ if x.OverflowUint(i) {
+ d.addErr(errors.Newf(v.Pos(), "integer %d overflows %s", i, kind))
+ break
+ }
+ x.SetUint(i)
+
+ case reflect.Float32, reflect.Float64:
+ f, err := v.Float64()
+ d.addErr(err)
+ if x.OverflowFloat(f) {
+ d.addErr(errors.Newf(v.Pos(), "float %g overflows %s", f, kind))
+ break
+ }
+ x.SetFloat(f)
+
+ case reflect.String:
+ s, err := v.String()
+ d.addErr(err)
+ x.SetString(s)
+
+ case reflect.Array:
+ d.clear(x)
+
+ t := x.Type()
+ n := x.Len()
+
+ if t.Elem().Kind() == reflect.Uint8 && v.Kind() == BytesKind {
+ b, err := v.Bytes()
+ d.addErr(err)
+ for i, c := range b {
+ if i >= n {
+ break
+ }
+ x.Index(i).SetUint(uint64(c))
+ }
+ break
+ }
+
+ var a []Value
+ list, err := v.List()
+ d.addErr(err)
+ for list.Next() {
+ a = append(a, list.Value())
+ }
+
+ for i, v := range a {
+ if i >= n {
+ break
+ }
+ d.decode(x.Index(i), v, false)
+ }
+
+ case reflect.Slice:
+ t := x.Type()
+ if t.Elem().Kind() == reflect.Uint8 && v.Kind() == BytesKind {
+ b, err := v.Bytes()
+ d.addErr(err)
+ x.SetBytes(b)
+ break
+ }
+
+ var a []Value
+ list, err := v.List()
+ d.addErr(err)
+ for list.Next() {
+ a = append(a, list.Value())
+ }
+
+ switch cap := x.Cap(); {
+ case cap == 0, // force a non-nil list
+ cap < len(a):
+ x.Set(reflect.MakeSlice(t, len(a), len(a)))
+
+ default:
+ x.SetLen(len(a))
+ }
+
+ for i, v := range a {
+ d.decode(x.Index(i), v, false)
+ }
+
+ case reflect.Struct:
+ d.convertStruct(x, v)
+
+ case reflect.Map:
+ d.convertMap(x, v)
+
+ default:
+ d.clear(x)
+ }
+}
+
+func (d *decoder) interfaceValue(v Value) (x interface{}) {
+ var err error
+ v, _ = v.Default()
+ switch v.Kind() {
+ case NullKind:
+ return nil
+
+ case BoolKind:
+ x, err = v.Bool()
+
+ case IntKind:
+ if i, err := v.Int64(); err == nil {
+ return int(i)
+ }
+ x, err = v.Int(nil)
+
+ case FloatKind:
+ x, err = v.Float64() // or big int or
+
+ case StringKind:
+ x, err = v.String()
+
+ case BytesKind:
+ x, err = v.Bytes()
+
+ case ListKind:
+ var a []interface{}
+ list, err := v.List()
+ d.addErr(err)
+ for list.Next() {
+ a = append(a, d.interfaceValue(list.Value()))
+ }
+ x = a
+
+ case StructKind:
+ m := map[string]interface{}{}
+ iter, err := v.Fields()
+ d.addErr(err)
+ for iter.Next() {
+ m[iter.Label()] = d.interfaceValue(iter.Value())
+ }
+ x = m
+
+ default:
+ err = incompleteError(v)
+ }
+
+ d.addErr(err)
+ return x
+}
+
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+// convertMap keeps an existing map and overwrites any entry found in v,
+// keeping other preexisting entries.
+func (d *decoder) convertMap(x reflect.Value, v Value) {
+ // Delete existing elements
+ t := x.Type()
+
+ // Map key must either have string kind, have an integer kind,
+ // or be an encoding.TextUnmarshaler.
+ switch t.Key().Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ default:
+ if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+ d.addErr(errors.Newf(v.Pos(), "unsupported key type %v", t.Key()))
+ return
+ }
+ }
+
+ if x.IsNil() {
+ x.Set(reflect.MakeMap(t))
+ }
+
+ var mapElem reflect.Value
+
+ iter, err := v.Fields()
+ d.addErr(err)
+ for iter.Next() {
+ key := iter.Label()
+
+ var kv reflect.Value
+ kt := t.Key()
+ switch {
+ case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(kt)
+ err := kv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(key))
+ d.addErr(err)
+ kv = kv.Elem()
+
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(kt)
+
+ default:
+ switch kt.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s := string(key)
+ n, err := strconv.ParseInt(s, 10, 64)
+ d.addErr(err)
+ if reflect.Zero(kt).OverflowInt(n) {
+ d.addErr(errors.Newf(v.Pos(), "key integer %d overflows %s", n, kt))
+ break
+ }
+ kv = reflect.ValueOf(n).Convert(kt)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s := string(key)
+ n, err := strconv.ParseUint(s, 10, 64)
+ d.addErr(err)
+ if reflect.Zero(kt).OverflowUint(n) {
+ d.addErr(errors.Newf(v.Pos(), "key integer %d overflows %s", n, kt))
+ break
+ }
+ kv = reflect.ValueOf(n).Convert(kt)
+
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ }
+
+ elemType := t.Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ d.decode(mapElem, iter.Value(), false)
+
+ if kv.IsValid() {
+ x.SetMapIndex(kv, mapElem)
+ }
+ }
+}
+
+func (d *decoder) convertStruct(x reflect.Value, v Value) {
+ t := x.Type()
+ fields := cachedTypeFields(t)
+
+ iter, err := v.Fields()
+ d.addErr(err)
+ for iter.Next() {
+
+ var f *goField
+ key := iter.Label()
+ if i, ok := fields.nameIndex[key]; ok {
+ // Found an exact name match.
+ f = &fields.list[i]
+ } else {
+ // Fall back to the expensive case-insensitive
+ // linear search.
+ key := []byte(key)
+ for i := range fields.list {
+ ff := &fields.list[i]
+ if ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ }
+ }
+
+ if f == nil {
+ continue
+ }
+
+ // Figure out field corresponding to key.
+ subv := x
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ // If a struct embeds a pointer to an unexported type,
+ // it is not possible to set a newly allocated value
+ // since the field is unexported.
+ //
+ // See https://golang.org/issue/21357
+ if !subv.CanSet() {
+ d.addErr(errors.Newf(v.Pos(),
+ "cannot set embedded pointer to unexported struct: %v",
+ subv.Type().Elem()))
+ subv = reflect.Value{}
+ break
+ }
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+
+ // TODO: make this an option
+ // else if d.disallowUnknownFields {
+ // d.saveError(fmt.Errorf("json: unknown field %q", key))
+ // }
+
+ d.decode(subv, iter.Value(), false)
+ }
+}
+
+type structFields struct {
+ list []goField
+ nameIndex map[string]int
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ case !unicode.IsLetter(c) && !unicode.IsDigit(c):
+ return false
+ }
+ }
+ return true
+}
+
+// A field represents a single Go field found in a struct.
+type goField struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ nameNonEsc string // `"` + name + `":`
+ nameEscHTML string // `"` + HTMLEscape(name) + `":`
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+}
+
+// byIndex sorts goField by index sequence.
+type byIndex []goField
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) structFields {
+ // Anonymous fields to explore at the current level and the next.
+ current := []goField{}
+ next := []goField{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ var count, nextCount map[reflect.Type]int
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []goField
+
+ // Buffer to run HTMLEscape on field names.
+ var nameEscBuf bytes.Buffer
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ isUnexported := sf.PkgPath != ""
+ if sf.Anonymous {
+ t := sf.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if isUnexported && t.Kind() != reflect.Struct {
+ // Ignore embedded fields of unexported non-struct types.
+ continue
+ }
+ // Do not ignore embedded fields of unexported struct types
+ // since they may have exported fields.
+ } else if isUnexported {
+ // Ignore unexported non-embedded fields.
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ field := goField{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ }
+ field.nameBytes = []byte(field.name)
+ field.equalFold = foldFunc(field.nameBytes)
+
+ // Build nameEscHTML and nameNonEsc ahead of time.
+ nameEscBuf.Reset()
+ nameEscBuf.WriteString(`"`)
+ json.HTMLEscape(&nameEscBuf, field.nameBytes)
+ nameEscBuf.WriteString(`":`)
+ field.nameEscHTML = nameEscBuf.String()
+ field.nameNonEsc = `"` + field.name + `":`
+
+ fields = append(fields, field)
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, goField{name: ft.Name(), index: index, typ: ft})
+ }
+ }
+ }
+ }
+
+ sort.Slice(fields, func(i, j int) bool {
+ x := fields
+ // sort field by name, breaking ties with depth, then
+ // breaking ties with "name came from json tag", then
+ // breaking ties with index sequence.
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+ })
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ nameIndex := make(map[string]int, len(fields))
+ for i, field := range fields {
+ nameIndex[field.name] = i
+ }
+ return structFields{fields, nameIndex}
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []goField) (goField, bool) {
+ // The fields are sorted in increasing index-length order, then by presence of tag.
+ // That means that the first field is the dominant one. We need only check
+ // for error cases: two fields at top level, either both tagged or neither tagged.
+ if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
+ return goField{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache sync.Map // map[reflect.Type]structFields
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) structFields {
+ if f, ok := fieldCache.Load(t); ok {
+ return f.(structFields)
+ }
+ f, _ := fieldCache.LoadOrStore(t, typeFields(t))
+ return f.(structFields)
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See https://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// If it encounters an Unmarshaler, indirect stops and returns that.
+// If decodingNull is true, indirect stops at the first settable pointer so it
+// can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // Issue #24153 indicates that it is generally not a guaranteed property
+ // that you may round-trip a reflect.Value by calling Value.Addr().Elem()
+ // and expect the value to still be settable for values derived from
+ // unexported embedded struct fields.
+ //
+ // The logic below effectively does this when it first addresses the value
+ // (to satisfy possible pointer methods) and continues to dereference
+ // subsequent pointers as necessary.
+ //
+ // After the first round-trip, we set v back to the original value to
+ // preserve the original RW flags contained in reflect.Value.
+ v0 := v
+ haveAddr := false
+
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ haveAddr = true
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ haveAddr = false
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if decodingNull && v.CanSet() {
+ break
+ }
+
+ // Prevent infinite loop if v is an interface pointing to its own address:
+ // var v interface{}
+ // v = &v
+ if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
+ v = v.Elem()
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 && v.CanInterface() {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if !decodingNull {
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ }
+
+ if haveAddr {
+ v = v0 // restore original value after round-trip Value.Addr().Elem()
+ haveAddr = false
+ } else {
+ v = v.Elem()
+ }
+ }
+ return nil, nil, v
+}
diff --git a/vendor/cuelang.org/go/cue/errors.go b/vendor/cuelang.org/go/cue/errors.go
new file mode 100644
index 0000000000..d079b970ac
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/errors.go
@@ -0,0 +1,134 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/runtime"
+)
+
+func (v Value) toErr(b *adt.Bottom) (err errors.Error) {
+ errs := errors.Errors(b.Err)
+ if len(errs) > 1 {
+ for _, e := range errs {
+ bb := *b
+ bb.Err = e
+ err = errors.Append(err, &valueError{v: v, err: &bb})
+ }
+ return err
+ }
+ return &valueError{v: v, err: b}
+}
+
+var _ errors.Error = &valueError{}
+
+// A valueError is returned as a result of evaluating a value.
+type valueError struct {
+ v Value
+ err *adt.Bottom
+}
+
+func (e *valueError) Unwrap() error {
+ if e.err.Err == nil {
+ return nil
+ }
+ return errors.Unwrap(e.err.Err)
+}
+
+func (e *valueError) Bottom() *adt.Bottom { return e.err }
+
+func (e *valueError) Error() string {
+ return errors.String(e)
+}
+
+func (e *valueError) Position() token.Pos {
+ if e.err.Err != nil {
+ return e.err.Err.Position()
+ }
+ src := e.err.Source()
+ if src == nil {
+ return token.NoPos
+ }
+ return src.Pos()
+}
+
+func (e *valueError) InputPositions() []token.Pos {
+ if e.err.Err == nil {
+ return nil
+ }
+ return e.err.Err.InputPositions()
+}
+
+func (e *valueError) Msg() (string, []interface{}) {
+ if e.err.Err == nil {
+ return "", nil
+ }
+ return e.err.Err.Msg()
+}
+
+func (e *valueError) Path() (a []string) {
+ if e.err.Err != nil {
+ a = e.err.Err.Path()
+ if a != nil {
+ return a
+ }
+ }
+ return pathToStrings(e.v.Path())
+}
+
+var errNotExists = &adt.Bottom{
+ Code: adt.IncompleteError,
+ NotExists: true,
+ Err: errors.Newf(token.NoPos, "undefined value"),
+}
+
+func mkErr(idx *runtime.Runtime, src adt.Node, args ...interface{}) *adt.Bottom {
+ var e *adt.Bottom
+ var code adt.ErrorCode = -1
+outer:
+ for i, a := range args {
+ switch x := a.(type) {
+ case adt.ErrorCode:
+ code = x
+ case *adt.Bottom:
+ e = adt.CombineErrors(nil, e, x)
+ case []*adt.Bottom:
+ for _, b := range x {
+ e = adt.CombineErrors(nil, e, b)
+ }
+ case errors.Error:
+ e = adt.CombineErrors(nil, e, &adt.Bottom{Err: x})
+ case adt.Expr:
+ case string:
+ args := args[i+1:]
+ // Do not expand message so that errors can be localized.
+ pos := pos(src)
+ if code < 0 {
+ code = 0
+ }
+ e = adt.CombineErrors(nil, e, &adt.Bottom{
+ Code: code,
+ Err: errors.Newf(pos, x, args...),
+ })
+ break outer
+ }
+ }
+ if code >= 0 {
+ e.Code = code
+ }
+ return e
+}
diff --git a/vendor/cuelang.org/go/cue/errors/errors.go b/vendor/cuelang.org/go/cue/errors/errors.go
new file mode 100644
index 0000000000..af5038c22b
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/errors/errors.go
@@ -0,0 +1,651 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package errors defines shared types for handling CUE errors.
+//
+// The pivotal error type in CUE packages is the interface type Error.
+// The information available in such errors can be most easily retrieved using
+// the Path, Positions, and Print functions.
+package errors // import "cuelang.org/go/cue/errors"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/mpvl/unique"
+
+ "cuelang.org/go/cue/token"
+)
+
+// New is a convenience wrapper for errors.New in the core library.
+// It does not return a CUE error.
+func New(msg string) error {
+ return errors.New(msg)
+}
+
+// Unwrap returns the result of calling the Unwrap method on err, if err
+// implements Unwrap. Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return errors.Unwrap(err)
+}
+
+// Is reports whether any error in err's chain matches target.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool {
+ return errors.Is(err, target)
+}
+
+// As finds the first error in err's chain that matches the type to which target
+// points, and if so, sets the target to its value and returns true. An error
+// matches a type if it is assignable to the target type, or if it has a method
+// As(interface{}) bool such that As(target) returns true. As will panic if
+// target is not a non-nil pointer to a type which implements error or is of
+// interface type.
+//
+// The As method should set the target to its value and return true if err
+// matches the type to which target points.
+func As(err error, target interface{}) bool {
+ return errors.As(err, target)
+}
+
+// A Message implements the error interface as well as Message to allow
+// internationalized messages. A Message is typically used as an embedding
+// in a CUE message.
+type Message struct {
+ format string
+ args []interface{}
+}
+
+// NewMessage creates an error message for human consumption. The arguments
+// are for later consumption, allowing the message to be localized at a later
+// time. The passed argument list should not be modified.
+func NewMessage(format string, args []interface{}) Message {
+ return Message{format: format, args: args}
+}
+
+// Msg returns a printf-style format string and its arguments for human
+// consumption.
+func (m *Message) Msg() (format string, args []interface{}) {
+ return m.format, m.args
+}
+
+func (m *Message) Error() string {
+ return fmt.Sprintf(m.format, m.args...)
+}
+
+// Error is the common error message.
+type Error interface {
+ // Position returns the primary position of an error. If multiple positions
+ // contribute equally, this reflects one of them.
+ Position() token.Pos
+
+ // InputPositions reports positions that contributed to an error, including
+ // the expressions resulting in the conflict, as well as values that were
+ // the input to this expression.
+ InputPositions() []token.Pos
+
+ // Error reports the error message without position information.
+ Error() string
+
+ // Path returns the path into the data tree where the error occurred.
+ // This path may be nil if the error is not associated with such a location.
+ Path() []string
+
+ // Msg returns the unformatted error message and its arguments for human
+ // consumption.
+ Msg() (format string, args []interface{})
+}
+
+// Positions returns all positions returned by an error, sorted
+// by relevance when possible and with duplicates removed.
+func Positions(err error) []token.Pos {
+ e := Error(nil)
+ if !errors.As(err, &e) {
+ return nil
+ }
+
+ a := make([]token.Pos, 0, 3)
+
+ sortOffset := 0
+ pos := e.Position()
+ if pos.IsValid() {
+ a = append(a, pos)
+ sortOffset = 1
+ }
+
+ for _, p := range e.InputPositions() {
+ if p.IsValid() && p != pos {
+ a = append(a, p)
+ }
+ }
+
+ byPos := byPos(a[sortOffset:])
+ sort.Sort(byPos)
+ k := unique.ToFront(byPos)
+ return a[:k+sortOffset]
+}
+
+type byPos []token.Pos
+
+func (s *byPos) Truncate(n int) { (*s) = (*s)[:n] }
+func (s byPos) Len() int { return len(s) }
+func (s byPos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byPos) Less(i, j int) bool { return comparePos(s[i], s[j]) == -1 }
+
+// Path returns the path of an Error if err is of that type.
+func Path(err error) []string {
+ if e := Error(nil); errors.As(err, &e) {
+ return e.Path()
+ }
+ return nil
+}
+
+// Newf creates an Error with the associated position and message.
+func Newf(p token.Pos, format string, args ...interface{}) Error {
+ return &posError{
+ pos: p,
+ Message: NewMessage(format, args),
+ }
+}
+
+// Wrapf creates an Error with the associated position and message. The provided
+// error is added for inspection context.
+func Wrapf(err error, p token.Pos, format string, args ...interface{}) Error {
+ pErr := &posError{
+ pos: p,
+ Message: NewMessage(format, args),
+ }
+ return Wrap(pErr, err)
+}
+
+// Wrap creates a new error where child is a subordinate error of parent.
+// If child is list of Errors, the result will itself be a list of errors
+// where child is a subordinate error of each parent.
+func Wrap(parent Error, child error) Error {
+ if child == nil {
+ return parent
+ }
+ a, ok := child.(list)
+ if !ok {
+ return &wrapped{parent, child}
+ }
+ b := make(list, len(a))
+ for i, err := range a {
+ b[i] = &wrapped{parent, err}
+ }
+ return b
+}
+
+type wrapped struct {
+ main Error
+ wrap error
+}
+
+// Error implements the error interface.
+func (e *wrapped) Error() string {
+ switch msg := e.main.Error(); {
+ case e.wrap == nil:
+ return msg
+ case msg == "":
+ return e.wrap.Error()
+ default:
+ return fmt.Sprintf("%s: %s", msg, e.wrap)
+ }
+}
+
+func (e *wrapped) Is(target error) bool {
+ return Is(e.main, target)
+}
+
+func (e *wrapped) As(target interface{}) bool {
+ return As(e.main, target)
+}
+
+func (e *wrapped) Msg() (format string, args []interface{}) {
+ return e.main.Msg()
+}
+
+func (e *wrapped) Path() []string {
+ if p := Path(e.main); p != nil {
+ return p
+ }
+ return Path(e.wrap)
+}
+
+func (e *wrapped) InputPositions() []token.Pos {
+ return append(e.main.InputPositions(), Positions(e.wrap)...)
+}
+
+func (e *wrapped) Position() token.Pos {
+ if p := e.main.Position(); p != token.NoPos {
+ return p
+ }
+ if wrap, ok := e.wrap.(Error); ok {
+ return wrap.Position()
+ }
+ return token.NoPos
+}
+
+func (e *wrapped) Unwrap() error { return e.wrap }
+
+func (e *wrapped) Cause() error { return e.wrap }
+
+// Promote converts a regular Go error to an Error if it isn't already one.
+func Promote(err error, msg string) Error {
+ switch x := err.(type) {
+ case Error:
+ return x
+ default:
+ return Wrapf(err, token.NoPos, msg)
+ }
+}
+
+var _ Error = &posError{}
+
+// In an List, an error is represented by an *posError.
+// The position Pos, if valid, points to the beginning of
+// the offending token, and the error condition is described
+// by Msg.
+type posError struct {
+ pos token.Pos
+ inputs []token.Pos
+ Message
+}
+
+func (e *posError) Path() []string { return nil }
+func (e *posError) InputPositions() []token.Pos { return e.inputs }
+func (e *posError) Position() token.Pos { return e.pos }
+
+// Append combines two errors, flattening Lists as necessary.
+func Append(a, b Error) Error {
+ switch x := a.(type) {
+ case nil:
+ return b
+ case list:
+ return appendToList(x, b)
+ }
+ // Preserve order of errors.
+ list := appendToList(nil, a)
+ list = appendToList(list, b)
+ return list
+}
+
+// Errors reports the individual errors associated with an error, which is
+// the error itself if there is only one or, if the underlying type is List,
+// its individual elements. If the given error is not an Error, it will be
+// promoted to one.
+func Errors(err error) []Error {
+ switch x := err.(type) {
+ case nil:
+ return nil
+ case list:
+ return []Error(x)
+ case Error:
+ return []Error{x}
+ default:
+ return []Error{Promote(err, "")}
+ }
+}
+
+func appendToList(a list, err Error) list {
+ switch x := err.(type) {
+ case nil:
+ return a
+ case list:
+ if a == nil {
+ return x
+ }
+ return append(a, x...)
+ default:
+ return append(a, err)
+ }
+}
+
+// list is a list of Errors.
+// The zero value for an list is an empty list ready to use.
+type list []Error
+
+func (p list) Is(err, target error) bool {
+ for _, e := range p {
+ if errors.Is(e, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func (p list) As(err error, target interface{}) bool {
+ for _, e := range p {
+ if errors.As(e, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// AddNewf adds an Error with given position and error message to an List.
+func (p *list) AddNewf(pos token.Pos, msg string, args ...interface{}) {
+ err := &posError{pos: pos, Message: Message{format: msg, args: args}}
+ *p = append(*p, err)
+}
+
+// Add adds an Error with given position and error message to an List.
+func (p *list) Add(err Error) {
+ *p = appendToList(*p, err)
+}
+
+// Reset resets an List to no errors.
+func (p *list) Reset() { *p = (*p)[:0] }
+
+// List implements the sort Interface.
+func (p list) Len() int { return len(p) }
+func (p list) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p list) Less(i, j int) bool {
+ if c := comparePos(p[i].Position(), p[j].Position()); c != 0 {
+ return c == -1
+ }
+ // Note that it is not sufficient to simply compare file offsets because
+ // the offsets do not reflect modified line information (through //line
+ // comments).
+
+ if !equalPath(p[i].Path(), p[j].Path()) {
+ return lessPath(p[i].Path(), p[j].Path())
+ }
+ return p[i].Error() < p[j].Error()
+}
+
+func lessOrMore(isLess bool) int {
+ if isLess {
+ return -1
+ }
+ return 1
+}
+
+func comparePos(a, b token.Pos) int {
+ if a.Filename() != b.Filename() {
+ return lessOrMore(a.Filename() < b.Filename())
+ }
+ if a.Line() != b.Line() {
+ return lessOrMore(a.Line() < b.Line())
+ }
+ if a.Column() != b.Column() {
+ return lessOrMore(a.Column() < b.Column())
+ }
+ return 0
+}
+
+func lessPath(a, b []string) bool {
+ for i, x := range a {
+ if i >= len(b) {
+ return false
+ }
+ if x != b[i] {
+ return x < b[i]
+ }
+ }
+ return len(a) < len(b)
+}
+
+func equalPath(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, x := range a {
+ if x != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Sanitize sorts multiple errors and removes duplicates on a best effort basis.
+// If err represents a single or no error, it returns the error as is.
+func Sanitize(err Error) Error {
+ if l, ok := err.(list); ok && err != nil {
+ a := make(list, len(l))
+ copy(a, l)
+ a.Sort()
+ a.RemoveMultiples()
+ return a
+ }
+ return err
+}
+
+// Sort sorts an List. *posError entries are sorted by position,
+// other errors are sorted by error message, and before any *posError
+// entry.
+//
+func (p list) Sort() {
+ sort.Sort(p)
+}
+
+// RemoveMultiples sorts an List and removes all but the first error per line.
+func (p *list) RemoveMultiples() {
+ p.Sort()
+ var last Error
+ i := 0
+ for _, e := range *p {
+ if last == nil || !approximateEqual(last, e) {
+ last = e
+ (*p)[i] = e
+ i++
+ }
+ }
+ (*p) = (*p)[0:i]
+}
+
+func approximateEqual(a, b Error) bool {
+ aPos := a.Position()
+ bPos := b.Position()
+ if aPos == token.NoPos || bPos == token.NoPos {
+ return a.Error() == b.Error()
+ }
+ return aPos.Filename() == bPos.Filename() &&
+ aPos.Line() == bPos.Line() &&
+ equalPath(a.Path(), b.Path())
+}
+
+// An List implements the error interface.
+func (p list) Error() string {
+ format, args := p.Msg()
+ return fmt.Sprintf(format, args...)
+}
+
+// Msg reports the unformatted error message for the first error, if any.
+func (p list) Msg() (format string, args []interface{}) {
+ switch len(p) {
+ case 0:
+ return "no errors", nil
+ case 1:
+ return p[0].Msg()
+ }
+ return "%s (and %d more errors)", []interface{}{p[0], len(p) - 1}
+}
+
+// Position reports the primary position for the first error, if any.
+func (p list) Position() token.Pos {
+ if len(p) == 0 {
+ return token.NoPos
+ }
+ return p[0].Position()
+}
+
+// InputPositions reports the input positions for the first error, if any.
+func (p list) InputPositions() []token.Pos {
+ if len(p) == 0 {
+ return nil
+ }
+ return p[0].InputPositions()
+}
+
+// Path reports the path location of the first error, if any.
+func (p list) Path() []string {
+ if len(p) == 0 {
+ return nil
+ }
+ return p[0].Path()
+}
+
+// Err returns an error equivalent to this error list.
+// If the list is empty, Err returns nil.
+func (p list) Err() error {
+ if len(p) == 0 {
+ return nil
+ }
+ return p
+}
+
+// A Config defines parameters for printing.
+type Config struct {
+ // Format formats the given string and arguments and writes it to w.
+ // It is used for all printing.
+ Format func(w io.Writer, format string, args ...interface{})
+
+ // Cwd is the current working directory. Filename positions are taken
+ // relative to this path.
+ Cwd string
+
+ // ToSlash sets whether to use Unix paths. Mostly used for testing.
+ ToSlash bool
+}
+
+// Print is a utility function that prints a list of errors to w,
+// one error per line, if the err parameter is an List. Otherwise
+// it prints the err string.
+//
+func Print(w io.Writer, err error, cfg *Config) {
+ if cfg == nil {
+ cfg = &Config{}
+ }
+ if e, ok := err.(Error); ok {
+ err = Sanitize(e)
+ }
+ for _, e := range Errors(err) {
+ printError(w, e, cfg)
+ }
+}
+
+// Details is a convenience wrapper for Print to return the error text as a
+// string.
+func Details(err error, cfg *Config) string {
+ w := &bytes.Buffer{}
+ Print(w, err, cfg)
+ return w.String()
+}
+
+// String generates a short message from a given Error.
+func String(err Error) string {
+ w := &strings.Builder{}
+ writeErr(w, err)
+ return w.String()
+}
+
+func writeErr(w io.Writer, err Error) {
+ if path := strings.Join(err.Path(), "."); path != "" {
+ _, _ = io.WriteString(w, path)
+ _, _ = io.WriteString(w, ": ")
+ }
+
+ for {
+ u := errors.Unwrap(err)
+
+ printed := false
+ msg, args := err.Msg()
+ if msg != "" || u == nil { // print at least something
+ fmt.Fprintf(w, msg, args...)
+ printed = true
+ }
+
+ if u == nil {
+ break
+ }
+
+ if printed {
+ _, _ = io.WriteString(w, ": ")
+ }
+ err, _ = u.(Error)
+ if err == nil {
+ fmt.Fprint(w, u)
+ break
+ }
+ }
+}
+
+func defaultFprintf(w io.Writer, format string, args ...interface{}) {
+ fmt.Fprintf(w, format, args...)
+}
+
+func printError(w io.Writer, err error, cfg *Config) {
+ if err == nil {
+ return
+ }
+ fprintf := cfg.Format
+ if fprintf == nil {
+ fprintf = defaultFprintf
+ }
+
+ positions := []string{}
+ for _, p := range Positions(err) {
+ pos := p.Position()
+ s := pos.Filename
+ if cfg.Cwd != "" {
+ if p, err := filepath.Rel(cfg.Cwd, s); err == nil {
+ s = p
+ // Some IDEs (e.g. VSCode) only recognize a path if it start
+ // with a dot. This also helps to distinguish between local
+ // files and builtin packages.
+ if !strings.HasPrefix(s, ".") {
+ s = fmt.Sprintf(".%s%s", string(filepath.Separator), s)
+ }
+ }
+ }
+ if cfg.ToSlash {
+ s = filepath.ToSlash(s)
+ }
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ positions = append(positions, s)
+ }
+
+ if e, ok := err.(Error); ok {
+ writeErr(w, e)
+ } else {
+ fprintf(w, "%v", err)
+ }
+
+ if len(positions) == 0 {
+ fprintf(w, "\n")
+ return
+ }
+
+ fprintf(w, ":\n")
+ for _, pos := range positions {
+ fprintf(w, " %s\n", pos)
+ }
+}
diff --git a/vendor/cuelang.org/go/cue/format.go b/vendor/cuelang.org/go/cue/format.go
new file mode 100644
index 0000000000..707a7990ef
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/format.go
@@ -0,0 +1,201 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/format"
+ "cuelang.org/go/internal/core/export"
+)
+
+// TODO:
+// * allow '-' to strip outer curly braces?
+// - simplify output; can be used in combination with other flags
+// * advertise:
+// c like v, but print comments
+// a like c, but print attributes and package-local hidden fields as well
+
+// Format prints a CUE value.
+//
+// WARNING:
+// although we are narrowing down the semantics, the verbs and options
+// are still subject to change. this API is experimental although it is
+// likely getting close to the final design.
+//
+// It recognizes the following verbs:
+//
+// v print CUE value
+//
+// The verbs support the following flags:
+// # print as schema and include definitions.
+// The result is printed as a self-contained file, instead of an the
+// expression format.
+// + evaluate: resolve defaults and error on incomplete errors
+//
+// Indentation can be controlled as follows:
+// width indent the cue block by tab stops (e.g. %2v)
+// precision convert tabs to spaces (e.g. %.2v), where
+// a value of 0 means no indentation or newlines (TODO).
+//
+// If the value kind corresponds to one of the following Go types, the
+// usual Go formatting verbs for that type can be used:
+//
+// Int: b,d,o,O,q,x,X
+// Float: f,e,E,g,G
+// String/Bytes: s,q,x,X
+//
+// The %v directive will be used if the type is not supported for that verb.
+//
+func (v Value) Format(state fmt.State, verb rune) {
+ if v.v == nil {
+ fmt.Fprint(state, "")
+ return
+ }
+
+ switch verb {
+ case 'a':
+ formatCUE(state, v, true, true)
+ case 'c':
+ formatCUE(state, v, true, false)
+ case 'v':
+ formatCUE(state, v, false, false)
+
+ case 'd', 'o', 'O', 'U':
+ var i big.Int
+ if _, err := v.Int(&i); err != nil {
+ formatCUE(state, v, false, false)
+ return
+ }
+ i.Format(state, verb)
+
+ case 'f', 'e', 'E', 'g', 'G':
+ d, err := v.Decimal()
+ if err != nil {
+ formatCUE(state, v, false, false)
+ return
+ }
+ d.Format(state, verb)
+
+ case 's', 'q':
+ // TODO: this drops other formatting directives
+ msg := "%s"
+ if verb == 'q' {
+ msg = "%q"
+ }
+
+ if b, err := v.Bytes(); err == nil {
+ fmt.Fprintf(state, msg, b)
+ } else {
+ s := fmt.Sprintf("%+v", v)
+ fmt.Fprintf(state, msg, s)
+ }
+
+ case 'x', 'X':
+ switch v.Kind() {
+ case StringKind, BytesKind:
+ b, _ := v.Bytes()
+ // TODO: this drops other formatting directives
+ msg := "%x"
+ if verb == 'X' {
+ msg = "%X"
+ }
+ fmt.Fprintf(state, msg, b)
+
+ case IntKind, NumberKind:
+ var i big.Int
+ _, _ = v.Int(&i)
+ i.Format(state, verb)
+
+ case FloatKind:
+ dec, _ := v.Decimal()
+ dec.Format(state, verb)
+
+ default:
+ formatCUE(state, v, false, false)
+ }
+
+ default:
+ formatCUE(state, v, false, false)
+ }
+}
+
+func formatCUE(state fmt.State, v Value, showDocs, showAll bool) {
+
+ pkgPath := v.instance().ID()
+
+ p := *export.Simplified
+
+ isDef := false
+ switch {
+ case state.Flag('#'):
+ isDef = true
+ p = export.Profile{
+ ShowOptional: true,
+ ShowDefinitions: true,
+ ShowHidden: true,
+ }
+
+ case state.Flag('+'):
+ p = *export.Final
+ fallthrough
+
+ default:
+ p.ShowHidden = showAll
+ }
+
+ p.ShowDocs = showDocs
+ p.ShowAttributes = showAll
+
+ var n ast.Node
+ if isDef {
+ n, _ = p.Def(v.idx, pkgPath, v.v)
+ } else {
+ n, _ = p.Value(v.idx, pkgPath, v.v)
+ }
+
+ formatExpr(state, n)
+}
+
+func formatExpr(state fmt.State, n ast.Node) {
+ opts := make([]format.Option, 0, 3)
+ if state.Flag('-') {
+ opts = append(opts, format.Simplify())
+ }
+ // TODO: handle verbs to allow formatting based on type:
+ if width, ok := state.Width(); ok {
+ opts = append(opts, format.IndentPrefix(width))
+ }
+ // TODO: consider this: should tabs or spaces be the default?
+ if tabwidth, ok := state.Precision(); ok {
+ // TODO: 0 means no newlines.
+ opts = append(opts,
+ format.UseSpaces(tabwidth),
+ format.TabIndent(false))
+ }
+ // TODO: consider this.
+ // else if state.Flag(' ') {
+ // opts = append(opts,
+ // format.UseSpaces(4),
+ // format.TabIndent(false))
+ // }
+
+ b, _ := format.Node(n, opts...)
+ b = bytes.Trim(b, "\n\r")
+ _, _ = state.Write(b)
+}
diff --git a/vendor/cuelang.org/go/cue/format/format.go b/vendor/cuelang.org/go/cue/format/format.go
new file mode 100644
index 0000000000..5e81eb3591
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/format/format.go
@@ -0,0 +1,350 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package format implements standard formatting of CUE configurations.
+package format // import "cuelang.org/go/cue/format"
+
+// TODO: this package is in need of a rewrite. When doing so, the API should
+// allow for reformatting an AST, without actually writing bytes.
+//
+// In essence, formatting determines the relative spacing to tokens. It should
+// be possible to have an abstract implementation providing such information
+// that can be used to either format or update an AST in a single walk.
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "text/tabwriter"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+)
+
+// An Option sets behavior of the formatter.
+type Option func(c *config)
+
+// Simplify allows the formatter to simplify output, such as removing
+// unnecessary quotes.
+func Simplify() Option {
+ return func(c *config) { c.simplify = true }
+}
+
+// UseSpaces specifies that tabs should be converted to spaces and sets the
+// default tab width.
+func UseSpaces(tabwidth int) Option {
+ return func(c *config) {
+ c.UseSpaces = true
+ c.Tabwidth = tabwidth
+ }
+}
+
+// TabIndent specifies whether to use tabs for indentation independent of
+// UseSpaces.
+func TabIndent(indent bool) Option {
+ return func(c *config) { c.TabIndent = indent }
+}
+
+// IndentPrefix specifies the number of tabstops to use as a prefix for every
+// line.
+func IndentPrefix(n int) Option {
+ return func(c *config) { c.Indent = n }
+}
+
+// TODO: make public
+// sortImportsOption causes import declarations to be sorted.
+func sortImportsOption() Option {
+ return func(c *config) { c.sortImports = true }
+}
+
+// TODO: other options:
+//
+// const (
+// RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
+// TabIndent // use tabs for indentation independent of UseSpaces
+// UseSpaces // use spaces instead of tabs for alignment
+// SourcePos // emit //line comments to preserve original source positions
+// )
+
+// Node formats node in canonical cue fmt style and writes the result to dst.
+//
+// The node type must be *ast.File, []syntax.Decl, syntax.Expr, syntax.Decl, or
+// syntax.Spec. Node does not modify node. Imports are not sorted for nodes
+// representing partial source files (for instance, if the node is not an
+// *ast.File).
+//
+// The function may return early (before the entire result is written) and
+// return a formatting error, for instance due to an incorrect AST.
+//
+func Node(node ast.Node, opt ...Option) ([]byte, error) {
+ cfg := newConfig(opt)
+ return cfg.fprint(node)
+}
+
+// Source formats src in canonical cue fmt style and returns the result or an
+// (I/O or syntax) error. src is expected to be a syntactically correct CUE
+// source file, or a list of CUE declarations or statements.
+//
+// If src is a partial source file, the leading and trailing space of src is
+// applied to the result (such that it has the same leading and trailing space
+// as src), and the result is indented by the same amount as the first line of
+// src containing code. Imports are not sorted for partial source files.
+//
+// Caution: Tools relying on consistent formatting based on the installed
+// version of cue (for instance, such as for presubmit checks) should execute
+// that cue binary instead of calling Source.
+//
+func Source(b []byte, opt ...Option) ([]byte, error) {
+ cfg := newConfig(opt)
+
+ f, err := parser.ParseFile("", b, parser.ParseComments)
+ if err != nil {
+ return nil, fmt.Errorf("parse: %s", err)
+ }
+
+ // print AST
+ return cfg.fprint(f)
+}
+
+type config struct {
+ UseSpaces bool
+ TabIndent bool
+ Tabwidth int // default: 4
+ Indent int // default: 0 (all code is indented at least by this much)
+
+ simplify bool
+ sortImports bool
+}
+
+func newConfig(opt []Option) *config {
+ cfg := &config{
+ Tabwidth: 8,
+ TabIndent: true,
+ UseSpaces: true,
+ }
+ for _, o := range opt {
+ o(cfg)
+ }
+ return cfg
+}
+
+// Config defines the output of Fprint.
+func (cfg *config) fprint(node interface{}) (out []byte, err error) {
+ var p printer
+ p.init(cfg)
+ if err = printNode(node, &p); err != nil {
+ return p.output, err
+ }
+
+ padchar := byte('\t')
+ if cfg.UseSpaces {
+ padchar = byte(' ')
+ }
+
+ twmode := tabwriter.StripEscape | tabwriter.TabIndent | tabwriter.DiscardEmptyColumns
+ if cfg.TabIndent {
+ twmode |= tabwriter.TabIndent
+ }
+
+ buf := &bytes.Buffer{}
+ tw := tabwriter.NewWriter(buf, 0, cfg.Tabwidth, 1, padchar, twmode)
+
+ // write printer result via tabwriter/trimmer to output
+ if _, err = tw.Write(p.output); err != nil {
+ return
+ }
+
+ err = tw.Flush()
+ if err != nil {
+ return buf.Bytes(), err
+ }
+
+ b := buf.Bytes()
+ if !cfg.TabIndent {
+ b = bytes.ReplaceAll(b, []byte{'\t'}, bytes.Repeat([]byte{' '}, cfg.Tabwidth))
+ }
+ return b, nil
+}
+
+// A formatter walks a syntax.Node, interspersed with comments and spacing
+// directives, in the order that they would occur in printed form.
+type formatter struct {
+ *printer
+
+ stack []frame
+ current frame
+ nestExpr int
+}
+
+func newFormatter(p *printer) *formatter {
+ f := &formatter{
+ printer: p,
+ current: frame{
+ settings: settings{
+ nodeSep: newline,
+ parentSep: newline,
+ },
+ },
+ }
+ return f
+}
+
+type whiteSpace int
+
+const (
+ ignore whiteSpace = 0
+
+ // write a space, or disallow it
+ blank whiteSpace = 1 << iota
+ vtab // column marker
+ noblank
+
+ nooverride
+
+ comma // print a comma, unless trailcomma overrides it
+ trailcomma // print a trailing comma unless closed on same line
+ declcomma // write a comma when not at the end of line
+
+ newline // write a line in a table
+ formfeed // next line is not part of the table
+ newsection // add two newlines
+
+ indent // request indent an extra level after the next newline
+ unindent // unindent a level after the next newline
+ indented // element was indented.
+)
+
+type frame struct {
+ cg []*ast.CommentGroup
+ pos int8
+
+ settings
+}
+
+type settings struct {
+ // separator is blank if the current node spans a single line and newline
+ // otherwise.
+ nodeSep whiteSpace
+ parentSep whiteSpace
+ override whiteSpace
+}
+
+// suppress spurious linter warning: field is actually used.
+func init() {
+ s := settings{}
+ _ = s.override
+}
+
+func (f *formatter) print(a ...interface{}) {
+ for _, x := range a {
+ f.Print(x)
+ switch x.(type) {
+ case string, token.Token: // , *syntax.BasicLit, *syntax.Ident:
+ f.current.pos++
+ }
+ }
+ f.visitComments(f.current.pos)
+}
+
+func (f *formatter) formfeed() whiteSpace {
+ if f.current.nodeSep == blank {
+ return blank
+ }
+ return formfeed
+}
+
+func (f *formatter) wsOverride(def whiteSpace) whiteSpace {
+ if f.current.override == ignore {
+ return def
+ }
+ return f.current.override
+}
+
+func (f *formatter) onOneLine(node ast.Node) bool {
+ a := node.Pos()
+ b := node.End()
+ if a.IsValid() && b.IsValid() {
+ return f.lineFor(a) == f.lineFor(b)
+ }
+ // TODO: walk and look at relative positions to determine the same?
+ return false
+}
+
+func (f *formatter) before(node ast.Node) bool {
+ f.stack = append(f.stack, f.current)
+ f.current = frame{settings: f.current.settings}
+ f.current.parentSep = f.current.nodeSep
+
+ if node != nil {
+ s, ok := node.(*ast.StructLit)
+ if ok && len(s.Elts) <= 1 && f.current.nodeSep != blank && f.onOneLine(node) {
+ f.current.nodeSep = blank
+ }
+ f.current.cg = node.Comments()
+ f.visitComments(f.current.pos)
+ return true
+ }
+ return false
+}
+
+func (f *formatter) after(node ast.Node) {
+ f.visitComments(127)
+ p := len(f.stack) - 1
+ f.current = f.stack[p]
+ f.stack = f.stack[:p]
+ f.current.pos++
+ f.visitComments(f.current.pos)
+}
+
+func (f *formatter) visitComments(until int8) {
+ c := &f.current
+
+ printed := false
+ for ; len(c.cg) > 0 && c.cg[0].Position <= until; c.cg = c.cg[1:] {
+ if printed {
+ f.Print(newsection)
+ }
+ printed = true
+ f.printComment(c.cg[0])
+ }
+}
+
+func (f *formatter) printComment(cg *ast.CommentGroup) {
+ f.Print(cg)
+
+ printBlank := false
+ if cg.Doc && len(f.output) > 0 {
+ f.Print(newline)
+ printBlank = true
+ }
+ for _, c := range cg.List {
+ isEnd := strings.HasPrefix(c.Text, "//")
+ if !printBlank {
+ if isEnd {
+ f.Print(vtab)
+ } else {
+ f.Print(blank)
+ }
+ }
+ f.Print(c.Slash)
+ f.Print(c)
+ if isEnd {
+ f.Print(newline)
+ if cg.Doc {
+ f.Print(nooverride)
+ }
+ }
+ }
+}
diff --git a/vendor/cuelang.org/go/cue/format/import.go b/vendor/cuelang.org/go/cue/format/import.go
new file mode 100644
index 0000000000..873de2c7f6
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/format/import.go
@@ -0,0 +1,167 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package format
+
+import (
+ "sort"
+ "strconv"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+)
+
+// sortImports sorts runs of consecutive import lines in import blocks in f.
+// It also removes duplicate imports when it is possible to do so without data
+// loss.
+func sortImports(d *ast.ImportDecl) {
+ if !d.Lparen.IsValid() || len(d.Specs) == 0 {
+ // Not a block: sorted by default.
+ return
+ }
+
+ // Identify and sort runs of specs on successive lines.
+ i := 0
+ specs := d.Specs[:0]
+ for j, s := range d.Specs {
+ if j > i && (s.Pos().RelPos() >= token.NewSection || hasDoc(s)) {
+ setRelativePos(s, token.Newline)
+ // j begins a new run. End this one.
+ block := sortSpecs(d.Specs[i:j])
+ specs = append(specs, block...)
+ i = j
+ }
+ }
+ specs = append(specs, sortSpecs(d.Specs[i:])...)
+ setRelativePos(specs[0], token.Newline)
+ d.Specs = specs
+}
+
+func setRelativePos(s *ast.ImportSpec, r token.RelPos) {
+ if hasDoc(s) {
+ return
+ }
+ pos := s.Pos().WithRel(r)
+ if s.Name != nil {
+ s.Name.NamePos = pos
+ } else {
+ s.Path.ValuePos = pos
+ }
+}
+
+func hasDoc(s *ast.ImportSpec) bool {
+ for _, doc := range s.Comments() {
+ if doc.Doc {
+ return true
+ }
+ }
+ return false
+}
+
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+func importName(s *ast.ImportSpec) string {
+ n := s.Name
+ if n == nil {
+ return ""
+ }
+ return n.Name
+}
+
+func importComment(s *ast.ImportSpec) string {
+ for _, c := range s.Comments() {
+ if c.Line {
+ return c.Text()
+ }
+ }
+ return ""
+}
+
+// collapse indicates whether prev may be removed, leaving only next.
+func collapse(prev, next *ast.ImportSpec) bool {
+ if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
+ return false
+ }
+ for _, c := range prev.Comments() {
+ if !c.Doc {
+ return false
+ }
+ }
+ return true
+}
+
+type posSpan struct {
+ Start token.Pos
+ End token.Pos
+}
+
+func sortSpecs(specs []*ast.ImportSpec) []*ast.ImportSpec {
+ // Can't short-circuit here even if specs are already sorted,
+ // since they might yet need deduplication.
+ // A lone import, however, may be safely ignored.
+ if len(specs) <= 1 {
+ setRelativePos(specs[0], token.NewSection)
+ return specs
+ }
+
+ // Record positions for specs.
+ pos := make([]posSpan, len(specs))
+ for i, s := range specs {
+ pos[i] = posSpan{s.Pos(), s.End()}
+ }
+
+ // Sort the import specs by import path.
+ // Remove duplicates, when possible without data loss.
+ // Reassign the import paths to have the same position sequence.
+ // Reassign each comment to abut the end of its spec.
+ // Sort the comments by new position.
+ sort.Sort(byImportSpec(specs))
+
+ // Dedup. Thanks to our sorting, we can just consider
+ // adjacent pairs of imports.
+ deduped := specs[:0]
+ for i, s := range specs {
+ if i == len(specs)-1 || !collapse(s, specs[i+1]) {
+ deduped = append(deduped, s)
+ }
+ }
+ specs = deduped
+
+ setRelativePos(specs[0], token.NewSection)
+ return specs
+}
+
+type byImportSpec []*ast.ImportSpec
+
+func (x byImportSpec) Len() int { return len(x) }
+func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportSpec) Less(i, j int) bool {
+ ipath := importPath(x[i])
+ jpath := importPath(x[j])
+ if ipath != jpath {
+ return ipath < jpath
+ }
+ iname := importName(x[i])
+ jname := importName(x[j])
+ if iname != jname {
+ return iname < jname
+ }
+ return importComment(x[i]) < importComment(x[j])
+}
diff --git a/vendor/cuelang.org/go/cue/format/node.go b/vendor/cuelang.org/go/cue/format/node.go
new file mode 100644
index 0000000000..6d06ae4123
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/format/node.go
@@ -0,0 +1,916 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package format
+
+import (
+ "fmt"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/scanner"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+)
+
+func printNode(node interface{}, f *printer) error {
+ s := newFormatter(f)
+
+ ls := labelSimplifier{scope: map[string]bool{}}
+
+ // format node
+ f.allowed = nooverride // gobble initial whitespace.
+ switch x := node.(type) {
+ case *ast.File:
+ if f.cfg.simplify {
+ ls.markReferences(x)
+ }
+ s.file(x)
+ case ast.Expr:
+ if f.cfg.simplify {
+ ls.markReferences(x)
+ }
+ s.expr(x)
+ case ast.Decl:
+ if f.cfg.simplify {
+ ls.markReferences(x)
+ }
+ s.decl(x)
+ // case ast.Node: // TODO: do we need this?
+ // s.walk(x)
+ case []ast.Decl:
+ if f.cfg.simplify {
+ ls.processDecls(x)
+ }
+ s.walkDeclList(x)
+ default:
+ goto unsupported
+ }
+
+ return s.errs
+
+unsupported:
+ return fmt.Errorf("cue/format: unsupported node type %T", node)
+}
+
+func isRegularField(tok token.Token) bool {
+ return tok == token.ILLEGAL || tok == token.COLON
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func nestDepth(f *ast.Field) int {
+ d := 1
+ if s, ok := f.Value.(*ast.StructLit); ok {
+ switch {
+ case len(s.Elts) != 1:
+ d = 0
+ default:
+ if f, ok := s.Elts[0].(*ast.Field); ok {
+ d += nestDepth(f)
+ }
+ }
+ }
+ return d
+}
+
+// TODO: be more accurate and move to astutil
+func hasDocComments(d ast.Decl) bool {
+ if len(d.Comments()) > 0 {
+ return true
+ }
+ switch x := d.(type) {
+ case *ast.Field:
+ return len(x.Label.Comments()) > 0
+ case *ast.Alias:
+ return len(x.Ident.Comments()) > 0
+ case *ast.LetClause:
+ return len(x.Ident.Comments()) > 0
+ }
+ return false
+}
+
+func (f *formatter) walkDeclList(list []ast.Decl) {
+ f.before(nil)
+ d := 0
+ hasEllipsis := false
+ for i, x := range list {
+ if i > 0 {
+ f.print(declcomma)
+ nd := 0
+ if f, ok := x.(*ast.Field); ok {
+ nd = nestDepth(f)
+ }
+ if f.current.parentSep == newline && (d == 0 || nd != d) {
+ f.print(f.formfeed())
+ }
+ if hasDocComments(x) {
+ switch x := list[i-1].(type) {
+ case *ast.Field:
+ if x.Token == token.ISA || internal.IsDefinition(x.Label) {
+ f.print(newsection)
+ }
+
+ default:
+ f.print(newsection)
+ }
+ }
+ }
+ if f.printer.cfg.simplify && internal.IsEllipsis(x) {
+ hasEllipsis = true
+ continue
+ }
+ f.decl(x)
+ d = 0
+ if f, ok := x.(*ast.Field); ok {
+ d = nestDepth(f)
+ }
+ if j := i + 1; j < len(list) {
+ switch x := list[j].(type) {
+ case *ast.Field:
+ switch x := x.Value.(type) {
+ case *ast.StructLit:
+ // TODO: not entirely correct: could have multiple elements,
+ // not have a valid Lbrace, and be marked multiline. This
+ // cannot occur for ASTs resulting from a parse, though.
+ if x.Lbrace.IsValid() || len(x.Elts) != 1 {
+ f.print(f.formfeed())
+ continue
+ }
+ case *ast.ListLit:
+ f.print(f.formfeed())
+ continue
+ }
+ }
+ }
+ f.print(f.current.parentSep)
+ }
+ if hasEllipsis {
+ f.decl(&ast.Ellipsis{})
+ f.print(f.current.parentSep)
+ }
+ f.after(nil)
+}
+
+func (f *formatter) walkSpecList(list []*ast.ImportSpec) {
+ f.before(nil)
+ for _, x := range list {
+ f.before(x)
+ f.importSpec(x)
+ f.after(x)
+ }
+ f.after(nil)
+}
+
+func (f *formatter) walkClauseList(list []ast.Clause, ws whiteSpace) {
+ f.before(nil)
+ for _, x := range list {
+ f.before(x)
+ f.print(ws)
+ f.clause(x)
+ f.after(x)
+ }
+ f.after(nil)
+}
+
+func (f *formatter) walkListElems(list []ast.Expr) {
+ f.before(nil)
+ for _, x := range list {
+ f.before(x)
+ switch n := x.(type) {
+ case *ast.Comprehension:
+ f.walkClauseList(n.Clauses, blank)
+ f.print(blank, nooverride)
+ f.expr(n.Value)
+
+ case *ast.Ellipsis:
+ f.ellipsis(n)
+
+ case *ast.Alias:
+ f.expr(n.Ident)
+ f.print(n.Equal, token.BIND)
+ f.expr(n.Expr)
+
+ // TODO: ast.CommentGroup: allows comment groups in ListLits.
+
+ case ast.Expr:
+ f.exprRaw(n, token.LowestPrec, 1)
+ }
+ f.print(comma, blank)
+ f.after(x)
+ }
+ f.after(nil)
+}
+
+func (f *formatter) walkArgsList(list []ast.Expr, depth int) {
+ f.before(nil)
+ for _, x := range list {
+ f.before(x)
+ f.exprRaw(x, token.LowestPrec, depth)
+ f.print(comma, blank)
+ f.after(x)
+ }
+ f.after(nil)
+}
+
+func (f *formatter) file(file *ast.File) {
+ f.before(file)
+ f.walkDeclList(file.Decls)
+ f.after(file)
+ f.print(token.EOF)
+}
+
+func (f *formatter) inlineField(n *ast.Field) *ast.Field {
+ regular := internal.IsRegularField(n)
+ // shortcut single-element structs.
+ // If the label has a valid position, we assume that an unspecified
+ // Lbrace signals the intend to collapse fields.
+ if !n.Label.Pos().IsValid() && !(f.printer.cfg.simplify && regular) {
+ return nil
+ }
+
+ obj, ok := n.Value.(*ast.StructLit)
+ if !ok || len(obj.Elts) != 1 ||
+ (obj.Lbrace.IsValid() && !f.printer.cfg.simplify) ||
+ (obj.Lbrace.IsValid() && hasDocComments(n)) ||
+ len(n.Attrs) > 0 {
+ return nil
+ }
+
+ mem, ok := obj.Elts[0].(*ast.Field)
+ if !ok || len(mem.Attrs) > 0 {
+ return nil
+ }
+
+ if hasDocComments(mem) {
+ // TODO: this inserts curly braces even in spaces where this
+ // may not be desirable, such as:
+ // a:
+ // // foo
+ // b: 3
+ return nil
+ }
+ return mem
+}
+
+func (f *formatter) decl(decl ast.Decl) {
+ if decl == nil {
+ return
+ }
+ defer f.after(decl)
+ if !f.before(decl) {
+ return
+ }
+
+ switch n := decl.(type) {
+ case *ast.Field:
+ f.label(n.Label, n.Optional != token.NoPos)
+
+ regular := isRegularField(n.Token)
+ if regular {
+ f.print(noblank, nooverride, n.TokenPos, token.COLON)
+ } else {
+ f.print(blank, nooverride, n.Token)
+ }
+
+ if mem := f.inlineField(n); mem != nil {
+ switch {
+ default:
+ fallthrough
+
+ case regular && f.cfg.simplify:
+ f.print(blank, nooverride)
+ f.decl(mem)
+
+ case mem.Label.Pos().IsNewline():
+ f.print(indent, formfeed)
+ f.decl(mem)
+ f.indent--
+ }
+ return
+ }
+
+ nextFF := f.nextNeedsFormfeed(n.Value)
+ tab := vtab
+ if nextFF {
+ tab = blank
+ }
+
+ f.print(tab)
+
+ if n.Value != nil {
+ switch n.Value.(type) {
+ case *ast.ListLit, *ast.StructLit:
+ f.expr(n.Value)
+ default:
+ f.print(indent)
+ f.expr(n.Value)
+ f.markUnindentLine()
+ }
+ } else {
+ f.current.pos++
+ f.visitComments(f.current.pos)
+ }
+
+ space := tab
+ for _, a := range n.Attrs {
+ if f.before(a) {
+ f.print(space, a.At, a)
+ }
+ f.after(a)
+ space = blank
+ }
+
+ if nextFF {
+ f.print(formfeed)
+ }
+
+ case *ast.BadDecl:
+ f.print(n.From, "*bad decl*", declcomma)
+
+ case *ast.Package:
+ f.print(n.PackagePos, "package")
+ f.print(blank, n.Name, newsection, nooverride)
+
+ case *ast.ImportDecl:
+ f.print(n.Import, "import")
+ if len(n.Specs) == 0 {
+ f.print(blank, n.Lparen, token.LPAREN, n.Rparen, token.RPAREN, newline)
+ break
+ }
+ switch {
+ case len(n.Specs) == 1 && len(n.Specs[0].Comments()) == 0:
+ if !n.Lparen.IsValid() {
+ f.print(blank)
+ f.walkSpecList(n.Specs)
+ break
+ }
+ fallthrough
+ default:
+ f.print(blank, n.Lparen, token.LPAREN, newline, indent)
+ f.walkSpecList(n.Specs)
+ f.print(unindent, newline, n.Rparen, token.RPAREN, newline)
+ }
+ f.print(newsection, nooverride)
+
+ case *ast.LetClause:
+ if !decl.Pos().HasRelPos() || decl.Pos().RelPos() >= token.Newline {
+ f.print(formfeed)
+ }
+ f.print(n.Let, token.LET, blank, nooverride)
+ f.expr(n.Ident)
+ f.print(blank, nooverride, n.Equal, token.BIND, blank)
+ f.expr(n.Expr)
+ f.print(declcomma) // implied
+
+ case *ast.EmbedDecl:
+ if !n.Pos().HasRelPos() || n.Pos().RelPos() >= token.Newline {
+ f.print(formfeed)
+ }
+ f.expr(n.Expr)
+ f.print(newline, noblank)
+
+ case *ast.Attribute:
+ f.print(n.At, n)
+
+ case *ast.CommentGroup:
+ f.printComment(n)
+ f.print(newsection)
+
+ case ast.Expr:
+ f.embedding(n)
+ }
+}
+
+func (f *formatter) embedding(decl ast.Expr) {
+ switch n := decl.(type) {
+ case *ast.Comprehension:
+ if !n.Pos().HasRelPos() || n.Pos().RelPos() >= token.Newline {
+ f.print(formfeed)
+ }
+ f.walkClauseList(n.Clauses, blank)
+ f.print(blank, nooverride)
+ f.expr(n.Value)
+
+ case *ast.Ellipsis:
+ f.ellipsis(n)
+
+ case *ast.Alias:
+ if !decl.Pos().HasRelPos() || decl.Pos().RelPos() >= token.Newline {
+ f.print(formfeed)
+ }
+ f.expr(n.Ident)
+ f.print(blank, n.Equal, token.BIND, blank)
+ f.expr(n.Expr)
+ f.print(declcomma) // implied
+
+ // TODO: ast.CommentGroup: allows comment groups in ListLits.
+
+ case ast.Expr:
+ f.exprRaw(n, token.LowestPrec, 1)
+ }
+}
+
+func (f *formatter) nextNeedsFormfeed(n ast.Expr) bool {
+ switch x := n.(type) {
+ case *ast.StructLit:
+ return true
+ case *ast.BasicLit:
+ return strings.IndexByte(x.Value, '\n') >= 0
+ case *ast.ListLit:
+ return true
+ }
+ return false
+}
+
+func (f *formatter) importSpec(x *ast.ImportSpec) {
+ if x.Name != nil {
+ f.label(x.Name, false)
+ f.print(blank)
+ } else {
+ f.current.pos++
+ f.visitComments(f.current.pos)
+ }
+ f.expr(x.Path)
+ f.print(newline)
+}
+
+func isValidIdent(ident string) bool {
+ var scan scanner.Scanner
+ scan.Init(token.NewFile("check", -1, len(ident)), []byte(ident), nil, 0)
+
+ _, tok, lit := scan.Scan()
+ if tok == token.IDENT || tok.IsKeyword() {
+ return lit == ident
+ }
+ return false
+}
+
+func (f *formatter) label(l ast.Label, optional bool) {
+ f.before(l)
+ defer f.after(l)
+ switch n := l.(type) {
+ case *ast.Alias:
+ f.expr(n)
+
+ case *ast.Ident:
+ // Escape an identifier that has invalid characters. This may happen,
+ // if the AST is not generated by the parser.
+ name := n.Name
+ if !ast.IsValidIdent(name) {
+ name = literal.String.Quote(n.Name)
+ }
+ f.print(n.NamePos, name)
+
+ case *ast.BasicLit:
+ str := n.Value
+ // Allow any CUE string in the AST, but ensure it is formatted
+ // according to spec.
+ if strings.HasPrefix(str, `"""`) || strings.HasPrefix(str, "#") {
+ if u, err := literal.Unquote(str); err == nil {
+ str = literal.String.Quote(u)
+ }
+ }
+ f.print(n.ValuePos, str)
+
+ case *ast.ListLit:
+ f.expr(n)
+
+ case *ast.ParenExpr:
+ f.expr(n)
+
+ case *ast.Interpolation:
+ f.expr(n)
+
+ default:
+ panic(fmt.Sprintf("unknown label type %T", n))
+ }
+ if optional {
+ f.print(token.OPTION)
+ }
+}
+
+func (f *formatter) ellipsis(x *ast.Ellipsis) {
+ f.print(x.Ellipsis, token.ELLIPSIS)
+ if x.Type != nil && !isTop(x.Type) {
+ f.expr(x.Type)
+ }
+}
+
+func (f *formatter) expr(x ast.Expr) {
+ const depth = 1
+ f.expr1(x, token.LowestPrec, depth)
+}
+
+func (f *formatter) expr0(x ast.Expr, depth int) {
+ f.expr1(x, token.LowestPrec, depth)
+}
+
+func (f *formatter) expr1(expr ast.Expr, prec1, depth int) {
+ if f.before(expr) {
+ f.exprRaw(expr, prec1, depth)
+ }
+ f.after(expr)
+}
+
+func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) {
+
+ switch x := expr.(type) {
+ case *ast.BadExpr:
+ f.print(x.From, "_|_")
+
+ case *ast.BottomLit:
+ f.print(x.Bottom, token.BOTTOM)
+
+ case *ast.Alias:
+ // Aliases in expression positions are printed in short form.
+ f.label(x.Ident, false)
+ f.print(x.Equal, token.BIND)
+ f.expr(x.Expr)
+
+ case *ast.Ident:
+ f.print(x.NamePos, x)
+
+ case *ast.BinaryExpr:
+ if depth < 1 {
+ f.internalError("depth < 1:", depth)
+ depth = 1
+ }
+ f.binaryExpr(x, prec1, cutoff(x, depth), depth)
+
+ case *ast.UnaryExpr:
+ const prec = token.UnaryPrec
+ if prec < prec1 {
+ // parenthesis needed
+ f.print(token.LPAREN, nooverride)
+ f.expr(x)
+ f.print(token.RPAREN)
+ } else {
+ // no parenthesis needed
+ f.print(x.OpPos, x.Op, nooverride)
+ f.expr1(x.X, prec, depth)
+ }
+
+ case *ast.BasicLit:
+ f.print(x.ValuePos, x)
+
+ case *ast.Interpolation:
+ f.before(nil)
+ for _, x := range x.Elts {
+ f.expr0(x, depth+1)
+ }
+ f.after(nil)
+
+ case *ast.ParenExpr:
+ if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
+ // don't print parentheses around an already parenthesized expression
+ // TODO: consider making this more general and incorporate precedence levels
+ f.expr0(x.X, depth)
+ } else {
+ f.print(x.Lparen, token.LPAREN)
+ f.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
+ f.print(x.Rparen, token.RPAREN)
+ }
+
+ case *ast.SelectorExpr:
+ f.selectorExpr(x, depth)
+
+ case *ast.IndexExpr:
+ f.expr1(x.X, token.HighestPrec, 1)
+ f.print(x.Lbrack, token.LBRACK)
+ f.expr0(x.Index, depth+1)
+ f.print(x.Rbrack, token.RBRACK)
+
+ case *ast.SliceExpr:
+ f.expr1(x.X, token.HighestPrec, 1)
+ f.print(x.Lbrack, token.LBRACK)
+ indices := []ast.Expr{x.Low, x.High}
+ for i, y := range indices {
+ if i > 0 {
+ // blanks around ":" if both sides exist and either side is a binary expression
+ x := indices[i-1]
+ if depth <= 1 && x != nil && y != nil && (isBinary(x) || isBinary(y)) {
+ f.print(blank, token.COLON, blank)
+ } else {
+ f.print(token.COLON)
+ }
+ }
+ if y != nil {
+ f.expr0(y, depth+1)
+ }
+ }
+ f.print(x.Rbrack, token.RBRACK)
+
+ case *ast.CallExpr:
+ if len(x.Args) > 1 {
+ depth++
+ }
+ wasIndented := f.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+ f.print(x.Lparen, token.LPAREN)
+ f.walkArgsList(x.Args, depth)
+ f.print(trailcomma, noblank, x.Rparen, token.RPAREN)
+ if wasIndented {
+ f.print(unindent)
+ }
+
+ case *ast.StructLit:
+ var l line
+ ws := noblank
+ ff := f.formfeed()
+
+ switch {
+ case len(x.Elts) == 0:
+ if !x.Rbrace.HasRelPos() {
+ // collapse curly braces if the body is empty.
+ ffAlt := blank | nooverride
+ for _, c := range x.Comments() {
+ if c.Position == 1 {
+ ffAlt = ff
+ }
+ }
+ ff = ffAlt
+ }
+ case !x.Rbrace.HasRelPos() || !x.Elts[0].Pos().HasRelPos():
+ ws |= newline | nooverride
+ }
+ f.print(x.Lbrace, token.LBRACE, &l, ws, ff, indent)
+
+ f.walkDeclList(x.Elts)
+ f.matchUnindent()
+
+ ws = noblank
+ if f.lineout != l {
+ ws |= newline
+ if f.lastTok != token.RBRACE && f.lastTok != token.RBRACK {
+ ws |= nooverride
+ }
+ }
+ f.print(ws, x.Rbrace, token.RBRACE)
+
+ case *ast.ListLit:
+ f.print(x.Lbrack, token.LBRACK, indent)
+ f.walkListElems(x.Elts)
+ f.print(trailcomma, noblank)
+ f.visitComments(f.current.pos)
+ f.matchUnindent()
+ f.print(noblank, x.Rbrack, token.RBRACK)
+
+ case *ast.Ellipsis:
+ f.ellipsis(x)
+
+ default:
+ panic(fmt.Sprintf("unimplemented type %T", x))
+ }
+}
+
+func (f *formatter) clause(clause ast.Clause) {
+ switch n := clause.(type) {
+ case *ast.ForClause:
+ f.print(n.For, "for", blank)
+ f.print(indent)
+ if n.Key != nil {
+ f.label(n.Key, false)
+ f.print(n.Colon, token.COMMA, blank)
+ } else {
+ f.current.pos++
+ f.visitComments(f.current.pos)
+ }
+ f.label(n.Value, false)
+ f.print(blank, n.In, "in", blank)
+ f.expr(n.Source)
+ f.markUnindentLine()
+
+ case *ast.IfClause:
+ f.print(n.If, "if", blank)
+ f.print(indent)
+ f.expr(n.Condition)
+ f.markUnindentLine()
+
+ case *ast.LetClause:
+ f.print(n.Let, token.LET, blank, nooverride)
+ f.print(indent)
+ f.expr(n.Ident)
+ f.print(blank, nooverride, n.Equal, token.BIND, blank)
+ f.expr(n.Expr)
+ f.markUnindentLine()
+
+ default:
+ panic("unknown clause type")
+ }
+}
+
+func walkBinary(e *ast.BinaryExpr) (has6, has7, has8 bool, maxProblem int) {
+ switch e.Op.Precedence() {
+ case 6:
+ has6 = true
+ case 7:
+ has7 = true
+ case 8:
+ has8 = true
+ }
+
+ switch l := e.X.(type) {
+ case *ast.BinaryExpr:
+ if l.Op.Precedence() < e.Op.Precedence() {
+ // parens will be inserted.
+ // pretend this is an *syntax.ParenExpr and do nothing.
+ break
+ }
+ h6, h7, h8, mp := walkBinary(l)
+ has6 = has6 || h6
+ has7 = has7 || h7
+ has8 = has8 || h8
+ if maxProblem < mp {
+ maxProblem = mp
+ }
+ }
+
+ switch r := e.Y.(type) {
+ case *ast.BinaryExpr:
+ if r.Op.Precedence() <= e.Op.Precedence() {
+ // parens will be inserted.
+ // pretend this is an *syntax.ParenExpr and do nothing.
+ break
+ }
+ h6, h7, h8, mp := walkBinary(r)
+ has6 = has6 || h6
+ has7 = has7 || h7
+ has8 = has8 || h8
+ if maxProblem < mp {
+ maxProblem = mp
+ }
+
+ case *ast.UnaryExpr:
+ switch e.Op.String() + r.Op.String() {
+ case "/*":
+ maxProblem = 8
+ case "++", "--":
+ if maxProblem < 6 {
+ maxProblem = 6
+ }
+ }
+ }
+ return
+}
+
+func cutoff(e *ast.BinaryExpr, depth int) int {
+ has6, has7, has8, maxProblem := walkBinary(e)
+ if maxProblem > 0 {
+ return maxProblem + 1
+ }
+ if (has6 || has7) && has8 {
+ if depth == 1 {
+ return 8
+ }
+ if has7 {
+ return 7
+ }
+ return 6
+ }
+ if has6 && has7 {
+ if depth == 1 {
+ return 7
+ }
+ return 6
+ }
+ if depth == 1 {
+ return 8
+ }
+ return 6
+}
+
+func diffPrec(expr ast.Expr, prec int) int {
+ x, ok := expr.(*ast.BinaryExpr)
+ if !ok || prec != x.Op.Precedence() {
+ return 1
+ }
+ return 0
+}
+
+func reduceDepth(depth int) int {
+ depth--
+ if depth < 1 {
+ depth = 1
+ }
+ return depth
+}
+
+// Format the binary expression: decide the cutoff and then format.
+// Let's call depth == 1 Normal mode, and depth > 1 Compact mode.
+// (Algorithm suggestion by Russ Cox.)
+//
+// The precedences are:
+// 7 * / % quo rem div mod
+// 6 + -
+// 5 == != < <= > >=
+// 4 &&
+// 3 ||
+// 2 &
+// 1 |
+//
+// The only decision is whether there will be spaces around levels 6 and 7.
+// There are never spaces at level 8 (unary), and always spaces at levels 5 and below.
+//
+// To choose the cutoff, look at the whole expression but excluding primary
+// expressions (function calls, parenthesized exprs), and apply these rules:
+//
+// 1) If there is a binary operator with a right side unary operand
+// that would clash without a space, the cutoff must be (in order):
+//
+// /* 8
+// ++ 7 // not necessary, but to avoid confusion
+// -- 7
+//
+// (Comparison operators always have spaces around them.)
+//
+// 2) If there is a mix of level 7 and level 6 operators, then the cutoff
+// is 7 (use spaces to distinguish precedence) in Normal mode
+// and 6 (never use spaces) in Compact mode.
+//
+// 3) If there are no level 6 operators or no level 7 operators, then the
+// cutoff is 8 (always use spaces) in Normal mode
+// and 6 (never use spaces) in Compact mode.
+//
+func (f *formatter) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
+ f.nestExpr++
+ defer func() { f.nestExpr-- }()
+
+ prec := x.Op.Precedence()
+ if prec < prec1 {
+ // parenthesis needed
+ // Note: The parser inserts an syntax.ParenExpr node; thus this case
+ // can only occur if the AST is created in a different way.
+ // defer p.pushComment(nil).pop()
+ f.print(token.LPAREN, nooverride)
+ f.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
+ f.print(token.RPAREN)
+ return
+ }
+
+ printBlank := prec < cutoff
+
+ f.expr1(x.X, prec, depth+diffPrec(x.X, prec))
+ f.print(nooverride)
+ if printBlank {
+ f.print(blank)
+ }
+ f.print(x.OpPos, x.Op)
+ if x.Y.Pos().IsNewline() {
+ // at least one line break, but respect an extra empty line
+ // in the source
+ f.print(formfeed)
+ printBlank = false // no blank after line break
+ } else {
+ f.print(nooverride)
+ }
+ if printBlank {
+ f.print(blank)
+ }
+ f.expr1(x.Y, prec+1, depth+1)
+}
+
+func isBinary(expr ast.Expr) bool {
+ _, ok := expr.(*ast.BinaryExpr)
+ return ok
+}
+
+func (f *formatter) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool {
+ if x, ok := expr.(*ast.SelectorExpr); ok {
+ return f.selectorExpr(x, depth)
+ }
+ f.expr1(expr, prec1, depth)
+ return false
+}
+
+// selectorExpr handles an *syntax.SelectorExpr node and returns whether x spans
+// multiple lines.
+func (f *formatter) selectorExpr(x *ast.SelectorExpr, depth int) bool {
+ f.expr1(x.X, token.HighestPrec, depth)
+ f.print(token.PERIOD)
+ if x.Sel.Pos().IsNewline() {
+ f.print(indent, formfeed)
+ f.expr(x.Sel.(ast.Expr))
+ f.print(unindent)
+ return true
+ }
+ f.print(noblank)
+ f.expr(x.Sel.(ast.Expr))
+ return false
+}
+
+func isTop(e ast.Expr) bool {
+ ident, ok := e.(*ast.Ident)
+ return ok && ident.Name == "_"
+}
diff --git a/vendor/cuelang.org/go/cue/format/printer.go b/vendor/cuelang.org/go/cue/format/printer.go
new file mode 100644
index 0000000000..a43154fa6c
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/format/printer.go
@@ -0,0 +1,424 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package format
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "text/tabwriter"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+)
+
+// A printer takes the stream of formatting tokens and spacing directives
+// produced by the formatter and adjusts the spacing based on the original
+// source code.
+type printer struct {
+ cfg *config
+
+ allowed whiteSpace
+ requested whiteSpace
+ indentStack []whiteSpace
+
+ pos token.Position // current pos in AST
+ lineout line
+
+ lastTok token.Token // last token printed (syntax.ILLEGAL if it's whitespace)
+
+ output []byte
+ indent int
+ spaceBefore bool
+
+ errs errors.Error
+}
+
+type line int
+
+func (p *printer) init(cfg *config) {
+ p.cfg = cfg
+ p.pos = token.Position{Line: 1, Column: 1}
+}
+
+func (p *printer) errf(n ast.Node, format string, args ...interface{}) {
+ p.errs = errors.Append(p.errs, errors.Newf(n.Pos(), format, args...))
+}
+
+const debug = false
+
+func (p *printer) internalError(msg ...interface{}) {
+ if debug {
+ fmt.Print(p.pos.String() + ": ")
+ fmt.Println(msg...)
+ panic("go/printer")
+ }
+}
+
+func (p *printer) lineFor(pos token.Pos) int {
+ return pos.Line()
+}
+
+func (p *printer) Print(v interface{}) {
+ var (
+ impliedComma = false
+ isLit bool
+ data string
+ nextWS whiteSpace
+ )
+ switch x := v.(type) {
+ case *line:
+ *x = p.lineout
+
+ case token.Token:
+ s := x.String()
+ before, after := mayCombine(p.lastTok, x)
+ if before && !p.spaceBefore {
+ // the previous and the current token must be
+ // separated by a blank otherwise they combine
+ // into a different incorrect token sequence
+ // (except for syntax.INT followed by a '.' this
+ // should never happen because it is taken care
+ // of via binary expression formatting)
+ if p.allowed&blank != 0 {
+ p.internalError("whitespace buffer not empty")
+ }
+ p.allowed |= blank
+ }
+ if after {
+ nextWS = blank
+ }
+ data = s
+ switch x {
+ case token.EOF:
+ data = ""
+ p.allowed = newline
+ p.allowed &^= newsection
+ case token.LPAREN, token.LBRACK, token.LBRACE:
+ case token.RPAREN, token.RBRACK, token.RBRACE:
+ impliedComma = true
+ }
+ p.lastTok = x
+
+ case *ast.BasicLit:
+ data = x.Value
+ switch x.Kind {
+ case token.STRING:
+ // TODO: only do this when simplifying. Right now this does not
+ // give the right result, but it should be better if:
+ // 1) simplification is done as a separate step
+ // 2) simplified structs are explicitly referenced separately
+ // in the AST.
+ if p.indent < 6 {
+ data = literal.IndentTabs(data, p.cfg.Indent+p.indent+1)
+ }
+
+ case token.INT:
+ if len(data) > 1 &&
+ data[0] == '0' &&
+ data[1] >= '0' && data[1] <= '9' {
+ data = "0o" + data[1:]
+ }
+ // Pad trailing dot before multiplier.
+ if p := strings.IndexByte(data, '.'); p >= 0 && data[p+1] > '9' {
+ data = data[:p+1] + "0" + data[p+1:]
+ }
+ // Lowercase E, but only if it is not the last character: in the
+ // future we may use E for Exa.
+ if p := strings.IndexByte(data, 'E'); p != -1 && p < len(data)-1 {
+ data = strings.ToLower(data)
+ }
+
+ case token.FLOAT:
+ // Pad leading or trailing dots.
+ switch p := strings.IndexByte(data, '.'); {
+ case p < 0:
+ case p == 0:
+ data = "0" + data
+ case p == len(data)-1:
+ data += "0"
+ case data[p+1] > '9':
+ data = data[:p+1] + "0" + data[p+1:]
+ }
+ if strings.IndexByte(data, 'E') != -1 {
+ data = strings.ToLower(data)
+ }
+ }
+
+ isLit = true
+ impliedComma = true
+ p.lastTok = x.Kind
+
+ case *ast.Ident:
+ data = x.Name
+ if !ast.IsValidIdent(data) {
+ p.errf(x, "invalid identifier %q", x.Name)
+ data = "*bad identifier*"
+ }
+ impliedComma = true
+ p.lastTok = token.IDENT
+
+ case string:
+ data = x
+ impliedComma = true
+ p.lastTok = token.STRING
+
+ case *ast.CommentGroup:
+ rel := x.Pos().RelPos()
+ if x.Line { // TODO: we probably don't need this.
+ rel = token.Blank
+ }
+ switch rel {
+ case token.NoRelPos:
+ case token.Newline, token.NewSection:
+ case token.Blank, token.Elided:
+ p.allowed |= blank
+ fallthrough
+ case token.NoSpace:
+ p.allowed &^= newline | newsection | formfeed | declcomma
+ }
+ return
+
+ case *ast.Attribute:
+ data = x.Text
+ impliedComma = true
+ p.lastTok = token.ATTRIBUTE
+
+ case *ast.Comment:
+ // TODO: if implied comma, postpone comment
+ data = x.Text
+ p.lastTok = token.COMMENT
+
+ case whiteSpace:
+ p.allowed |= x
+ return
+
+ case token.Pos:
+ // TODO: should we use a known file position to synchronize? Go does,
+ // but we don't really have to.
+ // pos := x
+ if x.HasRelPos() {
+ if p.allowed&nooverride == 0 {
+ requested := p.allowed
+ switch x.RelPos() {
+ case token.NoSpace:
+ requested &^= newline | newsection | formfeed
+ case token.Blank:
+ requested |= blank
+ requested &^= newline | newsection | formfeed
+ case token.Newline:
+ requested |= newline
+ case token.NewSection:
+ requested |= newsection
+ }
+ p.writeWhitespace(requested)
+ p.allowed = 0
+ p.requested = 0
+ }
+ // p.pos = pos
+ }
+ return
+
+ default:
+ fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", x, x)
+ panic("go/printer type")
+ }
+
+ p.writeWhitespace(p.allowed)
+ p.allowed = 0
+ p.requested = 0
+ p.writeString(data, isLit)
+ p.allowed = nextWS
+ _ = impliedComma // TODO: delay comment printings
+}
+
+func (p *printer) writeWhitespace(ws whiteSpace) {
+ if ws&comma != 0 {
+ switch {
+ case ws&(newsection|newline|formfeed) != 0,
+ ws&trailcomma == 0:
+ p.writeByte(',', 1)
+ }
+ }
+ if ws&indent != 0 {
+ p.markLineIndent(ws)
+ }
+ if ws&unindent != 0 {
+ p.markUnindentLine()
+ }
+ switch {
+ case ws&newsection != 0:
+ p.maybeIndentLine(ws)
+ p.writeByte('\f', 2)
+ p.lineout += 2
+ p.spaceBefore = true
+ case ws&formfeed != 0:
+ p.maybeIndentLine(ws)
+ p.writeByte('\f', 1)
+ p.lineout++
+ p.spaceBefore = true
+ case ws&newline != 0:
+ p.maybeIndentLine(ws)
+ p.writeByte('\n', 1)
+ p.lineout++
+ p.spaceBefore = true
+ case ws&declcomma != 0:
+ p.writeByte(',', 1)
+ p.writeByte(' ', 1)
+ p.spaceBefore = true
+ case ws&noblank != 0:
+ case ws&vtab != 0:
+ p.writeByte('\v', 1)
+ p.spaceBefore = true
+ case ws&blank != 0:
+ p.writeByte(' ', 1)
+ p.spaceBefore = true
+ }
+}
+
+func (p *printer) markLineIndent(ws whiteSpace) {
+ p.indentStack = append(p.indentStack, ws)
+}
+
+func (p *printer) markUnindentLine() (wasUnindented bool) {
+ last := len(p.indentStack) - 1
+ if ws := p.indentStack[last]; ws&indented != 0 {
+ p.indent--
+ wasUnindented = true
+ }
+ p.indentStack = p.indentStack[:last]
+ return wasUnindented
+}
+
+func (p *printer) maybeIndentLine(ws whiteSpace) {
+ if ws&unindent == 0 && len(p.indentStack) > 0 {
+ last := len(p.indentStack) - 1
+ if ws := p.indentStack[last]; ws&indented != 0 || ws&indent == 0 {
+ return
+ }
+ p.indentStack[last] |= indented
+ p.indent++
+ }
+}
+
+func (f *formatter) matchUnindent() whiteSpace {
+ f.allowed |= unindent
+ // TODO: make this work. Whitespace from closing bracket should match that
+ // of opening if there is no position information.
+ // f.allowed &^= nooverride | newline | newsection | formfeed | blank | noblank
+ // ws := f.indentStack[len(f.indentStack)-1]
+ // mask := blank | noblank | vtab
+ // f.allowed |= unindent | blank | noblank
+ // if ws&newline != 0 || ws*indented != 0 {
+ // f.allowed |= newline
+ // }
+ return 0
+}
+
+// writeString writes the string s to p.output and updates p.pos, p.out,
+// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
+// to protect s from being interpreted by the tabwriter.
+//
+// Note: writeString is only used to write Go tokens, literals, and
+// comments, all of which must be written literally. Thus, it is correct
+// to always set isLit = true. However, setting it explicitly only when
+// needed (i.e., when we don't know that s contains no tabs or line breaks)
+// avoids processing extra escape characters and reduces run time of the
+// printer benchmark by up to 10%.
+//
+func (p *printer) writeString(s string, isLit bool) {
+ if s != "" {
+ p.spaceBefore = false
+ }
+
+ if isLit {
+ // Protect s such that is passes through the tabwriter
+ // unchanged. Note that valid Go programs cannot contain
+ // tabwriter.Escape bytes since they do not appear in legal
+ // UTF-8 sequences.
+ p.output = append(p.output, tabwriter.Escape)
+ }
+
+ p.output = append(p.output, s...)
+
+ if isLit {
+ p.output = append(p.output, tabwriter.Escape)
+ }
+ // update positions
+ nLines := 0
+ var li int // index of last newline; valid if nLines > 0
+ for i := 0; i < len(s); i++ {
+ // CUE tokens cannot contain '\f' - no need to look for it
+ if s[i] == '\n' {
+ nLines++
+ li = i
+ }
+ }
+ p.pos.Offset += len(s)
+ if nLines > 0 {
+ p.pos.Line += nLines
+ c := len(s) - li
+ p.pos.Column = c
+ } else {
+ p.pos.Column += len(s)
+ }
+}
+
+func (p *printer) writeByte(ch byte, n int) {
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, ch)
+ }
+
+ // update positions
+ p.pos.Offset += n
+ if ch == '\n' || ch == '\f' {
+ p.pos.Line += n
+ p.pos.Column = 1
+
+ n := p.cfg.Indent + p.indent // include base indentation
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, '\t')
+ }
+
+ // update positions
+ p.pos.Offset += n
+ p.pos.Column += n
+
+ return
+ }
+ p.pos.Column += n
+}
+
+func mayCombine(prev, next token.Token) (before, after bool) {
+ s := next.String()
+ if 'a' <= s[0] && s[0] < 'z' {
+ return true, true
+ }
+ switch prev {
+ case token.IQUO, token.IREM, token.IDIV, token.IMOD:
+ return false, false
+ case token.INT:
+ before = next == token.PERIOD // 1.
+ case token.ADD:
+ before = s[0] == '+' // ++
+ case token.SUB:
+ before = s[0] == '-' // --
+ case token.QUO:
+ before = s[0] == '*' // /*
+ }
+ return before, false
+}
diff --git a/vendor/cuelang.org/go/cue/format/simplify.go b/vendor/cuelang.org/go/cue/format/simplify.go
new file mode 100644
index 0000000000..f4981978cc
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/format/simplify.go
@@ -0,0 +1,113 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package format
+
+import (
+ "strconv"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/internal"
+)
+
+// labelSimplifier rewrites string labels to identifiers if
+// no identifiers will subsequently bind to the exposed label.
+// In other words, string labels are only replaced if this does
+// not change the semantics of the CUE code.
+type labelSimplifier struct {
+ parent *labelSimplifier
+ scope map[string]bool
+}
+
+func (s *labelSimplifier) processDecls(decls []ast.Decl) {
+ sc := labelSimplifier{parent: s, scope: map[string]bool{}}
+ for _, d := range decls {
+ switch x := d.(type) {
+ case *ast.Field:
+ ast.Walk(x.Label, sc.markStrings, nil)
+ }
+ }
+
+ for _, d := range decls {
+ switch x := d.(type) {
+ case *ast.Field:
+ ast.Walk(x.Value, sc.markReferences, nil)
+ default:
+ ast.Walk(x, sc.markReferences, nil)
+ }
+ }
+
+ for _, d := range decls {
+ switch x := d.(type) {
+ case *ast.Field:
+ x.Label = astutil.Apply(x.Label, sc.replace, nil).(ast.Label)
+ }
+ }
+}
+
+func (s *labelSimplifier) markReferences(n ast.Node) bool {
+ // Record strings at this level.
+ switch x := n.(type) {
+ case *ast.File:
+ s.processDecls(x.Decls)
+ return false
+
+ case *ast.StructLit:
+ s.processDecls(x.Elts)
+ return false
+
+ case *ast.SelectorExpr:
+ ast.Walk(x.X, s.markReferences, nil)
+ return false
+
+ case *ast.Ident:
+ for c := s; c != nil; c = c.parent {
+ if _, ok := c.scope[x.Name]; ok {
+ c.scope[x.Name] = false
+ break
+ }
+ }
+ }
+ return true
+}
+
+func (s *labelSimplifier) markStrings(n ast.Node) bool {
+ switch x := n.(type) {
+ case *ast.BasicLit:
+ str, err := strconv.Unquote(x.Value)
+ if err != nil || !ast.IsValidIdent(str) || internal.IsDefOrHidden(str) {
+ return false
+ }
+ s.scope[str] = true
+
+ case *ast.Ident:
+ s.scope[x.Name] = true
+
+ case *ast.ListLit, *ast.Interpolation:
+ return false
+ }
+ return true
+}
+
+func (s *labelSimplifier) replace(c astutil.Cursor) bool {
+ switch x := c.Node().(type) {
+ case *ast.BasicLit:
+ str, err := strconv.Unquote(x.Value)
+ if err == nil && s.scope[str] && !internal.IsDefOrHidden(str) {
+ c.Replace(ast.NewIdent(str))
+ }
+ }
+ return true
+}
diff --git a/vendor/cuelang.org/go/cue/instance.go b/vendor/cuelang.org/go/cue/instance.go
new file mode 100644
index 0000000000..0fe93c49fd
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/instance.go
@@ -0,0 +1,357 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/compile"
+ "cuelang.org/go/internal/core/runtime"
+)
+
+// An InstanceOrValue is implemented by Value and *Instance.
+//
+// This is a placeholder type that is used to allow Instance-based APIs to
+// transition to Value-based APIs. The goals is to get rid of the Instance
+// type before v1.0.0.
+type InstanceOrValue interface {
+ Value() Value
+
+ internal()
+}
+
+func (Value) internal() {}
+func (*Instance) internal() {}
+
+// Value implements value.Instance.
+func (v hiddenValue) Value() Value { return v }
+
+// An Instance defines a single configuration based on a collection of
+// underlying CUE files.
+type Instance struct {
+ index *runtime.Runtime
+
+ root *adt.Vertex
+
+ ImportPath string
+ Dir string
+ PkgName string
+ DisplayName string
+
+ Incomplete bool // true if Pkg and all its dependencies are free of errors
+ Err errors.Error // non-nil if the package had errors
+
+ inst *build.Instance
+}
+
+type hiddenInstance = Instance
+
+func addInst(x *runtime.Runtime, p *Instance) *Instance {
+ if p.inst == nil {
+ p.inst = &build.Instance{
+ ImportPath: p.ImportPath,
+ PkgName: p.PkgName,
+ }
+ }
+ x.AddInst(p.ImportPath, p.root, p.inst)
+ x.SetBuildData(p.inst, p)
+ p.index = x
+ return p
+}
+
+func lookupInstance(x *runtime.Runtime, p *build.Instance) *Instance {
+ if x, ok := x.BuildData(p); ok {
+ return x.(*Instance)
+ }
+ return nil
+}
+
+func getImportFromBuild(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance {
+ inst := lookupInstance(x, p)
+
+ if inst != nil {
+ return inst
+ }
+
+ inst = &Instance{
+ ImportPath: p.ImportPath,
+ Dir: p.Dir,
+ PkgName: p.PkgName,
+ DisplayName: p.ImportPath,
+ root: v,
+ inst: p,
+ index: x,
+ }
+ if p.Err != nil {
+ inst.setListOrError(p.Err)
+ }
+
+ x.SetBuildData(p, inst)
+
+ return inst
+}
+
+func getImportFromNode(x *runtime.Runtime, v *adt.Vertex) *Instance {
+ p := x.GetInstanceFromNode(v)
+ if p == nil {
+ return nil
+ }
+
+ return getImportFromBuild(x, p, v)
+}
+
+func getImportFromPath(x *runtime.Runtime, id string) *Instance {
+ node := x.LoadImport(id)
+ if node == nil {
+ return nil
+ }
+ b := x.GetInstanceFromNode(node)
+ inst := lookupInstance(x, b)
+ if inst == nil {
+ inst = &Instance{
+ ImportPath: b.ImportPath,
+ PkgName: b.PkgName,
+ root: node,
+ inst: b,
+ index: x,
+ }
+ }
+ return inst
+}
+
+func init() {
+ internal.MakeInstance = func(value interface{}) interface{} {
+ v := value.(Value)
+ x := v.eval(v.ctx())
+ st, ok := x.(*adt.Vertex)
+ if !ok {
+ st = &adt.Vertex{}
+ st.AddConjunct(adt.MakeRootConjunct(nil, x))
+ }
+ return addInst(v.idx, &Instance{
+ root: st,
+ })
+ }
+}
+
+// newInstance creates a new instance. Use Insert to populate the instance.
+func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance {
+ // TODO: associate root source with structLit.
+ inst := &Instance{
+ root: v,
+ inst: p,
+ }
+ if p != nil {
+ inst.ImportPath = p.ImportPath
+ inst.Dir = p.Dir
+ inst.PkgName = p.PkgName
+ inst.DisplayName = p.ImportPath
+ if p.Err != nil {
+ inst.setListOrError(p.Err)
+ }
+ }
+
+ x.AddInst(p.ImportPath, v, p)
+ x.SetBuildData(p, inst)
+ inst.index = x
+ return inst
+}
+
+func (inst *Instance) setListOrError(err errors.Error) {
+ inst.Incomplete = true
+ inst.Err = errors.Append(inst.Err, err)
+}
+
+func (inst *Instance) setError(err errors.Error) {
+ inst.Incomplete = true
+ inst.Err = errors.Append(inst.Err, err)
+}
+
+func (inst *Instance) eval(ctx *adt.OpContext) adt.Value {
+ // TODO: remove manifest here?
+ v := manifest(ctx, inst.root)
+ return v
+}
+
+// ID returns the package identifier that uniquely qualifies module and
+// package name.
+func (inst *Instance) ID() string {
+ if inst == nil || inst.inst == nil {
+ return ""
+ }
+ return inst.inst.ID()
+}
+
+// Doc returns the package comments for this instance.
+//
+// Deprecated: use inst.Value().Doc()
+func (inst *hiddenInstance) Doc() []*ast.CommentGroup {
+ return inst.Value().Doc()
+}
+
+// Value returns the root value of the configuration. If the configuration
+// defines in emit value, it will be that value. Otherwise it will be all
+// top-level values.
+func (inst *Instance) Value() Value {
+ ctx := newContext(inst.index)
+ inst.root.Finalize(ctx)
+ return newVertexRoot(inst.index, ctx, inst.root)
+}
+
+// Eval evaluates an expression within an existing instance.
+//
+// Expressions may refer to builtin packages if they can be uniquely identified.
+//
+// Deprecated: use
+// inst.Value().Context().BuildExpr(expr, Scope(inst.Value), InferBuiltins(true))
+func (inst *hiddenInstance) Eval(expr ast.Expr) Value {
+ v := inst.Value()
+ return v.Context().BuildExpr(expr, Scope(v), InferBuiltins(true))
+}
+
+// DO NOT USE.
+//
+// Deprecated: do not use.
+func Merge(inst ...*Instance) *Instance {
+ v := &adt.Vertex{}
+
+ i := inst[0]
+ ctx := newContext(i.index)
+
+ // TODO: interesting test: use actual unification and then on K8s corpus.
+
+ for _, i := range inst {
+ w := i.Value()
+ v.AddConjunct(adt.MakeRootConjunct(nil, w.v.ToDataAll()))
+ }
+ v.Finalize(ctx)
+
+ p := addInst(i.index, &Instance{
+ root: v,
+ })
+ return p
+}
+
+// Build creates a new instance from the build instances, allowing unbound
+// identifier to bind to the top-level field in inst. The top-level fields in
+// inst take precedence over predeclared identifier and builtin functions.
+//
+// Deprecated: use Context.Build
+func (inst *hiddenInstance) Build(p *build.Instance) *Instance {
+ p.Complete()
+
+ idx := inst.index
+ r := inst.index
+
+ rErr := r.ResolveFiles(p)
+
+ cfg := &compile.Config{Scope: valueScope(Value{idx: r, v: inst.root})}
+ v, err := compile.Files(cfg, r, p.ID(), p.Files...)
+
+ v.AddConjunct(adt.MakeRootConjunct(nil, inst.root))
+
+ i := newInstance(idx, p, v)
+ if rErr != nil {
+ i.setListOrError(rErr)
+ }
+ if i.Err != nil {
+ i.setListOrError(i.Err)
+ }
+
+ if err != nil {
+ i.setListOrError(err)
+ }
+
+ return i
+}
+
+func (inst *Instance) value() Value {
+ return newVertexRoot(inst.index, newContext(inst.index), inst.root)
+}
+
+// Lookup reports the value at a path starting from the top level struct. The
+// Exists method of the returned value will report false if the path did not
+// exist. The Err method reports if any error occurred during evaluation. The
+// empty path returns the top-level configuration struct. Use LookupDef for definitions or LookupField for
+// any kind of field.
+//
+// Deprecated: use Value.LookupPath
+func (inst *hiddenInstance) Lookup(path ...string) Value {
+ return inst.value().Lookup(path...)
+}
+
+// LookupDef reports the definition with the given name within struct v. The
+// Exists method of the returned value will report false if the definition did
+// not exist. The Err method reports if any error occurred during evaluation.
+//
+// Deprecated: use Value.LookupPath
+func (inst *hiddenInstance) LookupDef(path string) Value {
+ return inst.value().LookupDef(path)
+}
+
+// LookupField reports a Field at a path starting from v, or an error if the
+// path is not. The empty path returns v itself.
+//
+// It cannot look up hidden or unexported fields.
+//
+// Deprecated: this API does not work with new-style definitions. Use
+// FieldByName defined on inst.Value().
+//
+// Deprecated: use Value.LookupPath
+func (inst *hiddenInstance) LookupField(path ...string) (f FieldInfo, err error) {
+ v := inst.value()
+ for _, k := range path {
+ s, err := v.Struct()
+ if err != nil {
+ return f, err
+ }
+
+ f, err = s.FieldByName(k, true)
+ if err != nil {
+ return f, err
+ }
+ if f.IsHidden {
+ return f, errNotFound
+ }
+ v = f.Value
+ }
+ return f, err
+}
+
+// Fill creates a new instance with the values of the old instance unified with
+// the given value. It is not possible to update the emit value.
+//
+// Values may be any Go value that can be converted to CUE, an ast.Expr or
+// a Value. In the latter case, it will panic if the Value is not from the same
+// Runtime.
+//
+// Deprecated: use Value.FillPath()
+func (inst *hiddenInstance) Fill(x interface{}, path ...string) (*Instance, error) {
+ v := inst.Value().Fill(x, path...)
+
+ inst = addInst(inst.index, &Instance{
+ root: v.v,
+ inst: nil,
+
+ // Omit ImportPath to indicate this is not an importable package.
+ Dir: inst.Dir,
+ PkgName: inst.PkgName,
+ Incomplete: inst.Incomplete,
+ })
+ return inst, nil
+}
diff --git a/vendor/cuelang.org/go/cue/literal/doc.go b/vendor/cuelang.org/go/cue/literal/doc.go
new file mode 100644
index 0000000000..3d3095c6ce
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/literal/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package literal implements conversions to and from string representations of
+// basic data types.
+package literal
diff --git a/vendor/cuelang.org/go/cue/literal/indent.go b/vendor/cuelang.org/go/cue/literal/indent.go
new file mode 100644
index 0000000000..193ca3b440
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/literal/indent.go
@@ -0,0 +1,33 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package literal
+
+import "strings"
+
+// IndentTabs takes a quoted string and reindents it for the given indentation.
+// If a string is not a multiline string it will return the string as is.
+func IndentTabs(s string, n int) string {
+ indent := tabs(n)
+
+ qi, _, _, err := ParseQuotes(s, s)
+ if err != nil || !qi.multiline || qi.whitespace == indent {
+ return s
+ }
+
+ search := "\n" + qi.whitespace
+ replace := "\n" + indent
+
+ return strings.ReplaceAll(s, search, replace)
+}
diff --git a/vendor/cuelang.org/go/cue/literal/num.go b/vendor/cuelang.org/go/cue/literal/num.go
new file mode 100644
index 0000000000..bb77d5b2f2
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/literal/num.go
@@ -0,0 +1,357 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package literal
+
+import (
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "github.com/cockroachdb/apd/v2"
+)
+
+var baseContext apd.Context
+
+func init() {
+ baseContext = apd.BaseContext
+ baseContext.Precision = 24
+}
+
+// NumInfo contains information about a parsed numbers.
+//
+// Reusing a NumInfo across parses may avoid memory allocations.
+type NumInfo struct {
+ pos token.Pos
+ src string
+ p int
+ ch byte
+ buf []byte
+
+ mul Multiplier
+ base byte
+ neg bool
+ UseSep bool
+ isFloat bool
+ err error
+}
+
+// String returns a canonical string representation of the number so that
+// it can be parsed with math.Float.Parse.
+func (p *NumInfo) String() string {
+ if len(p.buf) > 0 && p.base == 10 && p.mul == 0 {
+ return string(p.buf)
+ }
+ var d apd.Decimal
+ _ = p.decimal(&d)
+ return d.String()
+}
+
+type decimal = apd.Decimal
+
+// Decimal is for internal use.
+func (p *NumInfo) Decimal(v *decimal) error {
+ return p.decimal(v)
+}
+
+func (p *NumInfo) decimal(v *apd.Decimal) error {
+ if p.base != 10 {
+ _, _, _ = v.SetString("0")
+ b := p.buf
+ if p.buf[0] == '-' {
+ v.Negative = p.neg
+ b = p.buf[1:]
+ }
+ v.Coeff.SetString(string(b), int(p.base))
+ return nil
+ }
+ _ = v.UnmarshalText(p.buf)
+ if p.mul != 0 {
+ _, _ = baseContext.Mul(v, v, mulToRat[p.mul])
+ cond, _ := baseContext.RoundToIntegralExact(v, v)
+ if cond.Inexact() {
+ return p.errorf("number cannot be represented as int")
+ }
+ }
+ return nil
+}
+
+// Multiplier reports which multiplier was used in an integral number.
+func (p *NumInfo) Multiplier() Multiplier {
+ return p.mul
+}
+
+// IsInt reports whether the number is an integral number.
+func (p *NumInfo) IsInt() bool {
+ return !p.isFloat
+}
+
+// ParseNum parses s and populates NumInfo with the result.
+func ParseNum(s string, n *NumInfo) error {
+ *n = NumInfo{pos: n.pos, src: s, buf: n.buf[:0]}
+ if !n.next() {
+ return n.errorf("invalid number %q", s)
+ }
+ if n.ch == '-' {
+ n.neg = true
+ n.buf = append(n.buf, '-')
+ n.next()
+ }
+ seenDecimalPoint := false
+ if n.ch == '.' {
+ n.next()
+ seenDecimalPoint = true
+ }
+ err := n.scanNumber(seenDecimalPoint)
+ if err != nil {
+ return err
+ }
+ if n.err != nil {
+ return n.err
+ }
+ if n.p < len(n.src) {
+ return n.errorf("invalid number %q", s)
+ }
+ if len(n.buf) == 0 {
+ n.buf = append(n.buf, '0')
+ }
+ return nil
+}
+
+func (p *NumInfo) errorf(format string, args ...interface{}) error {
+ return errors.Newf(p.pos, format, args...)
+}
+
+// A Multiplier indicates a multiplier indicator used in the literal.
+type Multiplier byte
+
+const (
+ mul1 Multiplier = 1 + iota
+ mul2
+ mul3
+ mul4
+ mul5
+ mul6
+ mul7
+ mul8
+
+ mulBin = 0x10
+ mulDec = 0x20
+
+ K = mulDec | mul1
+ M = mulDec | mul2
+ G = mulDec | mul3
+ T = mulDec | mul4
+ P = mulDec | mul5
+ E = mulDec | mul6
+ Z = mulDec | mul7
+ Y = mulDec | mul8
+
+ Ki = mulBin | mul1
+ Mi = mulBin | mul2
+ Gi = mulBin | mul3
+ Ti = mulBin | mul4
+ Pi = mulBin | mul5
+ Ei = mulBin | mul6
+ Zi = mulBin | mul7
+ Yi = mulBin | mul8
+)
+
+func (p *NumInfo) next() bool {
+ if p.p >= len(p.src) {
+ p.ch = 0
+ return false
+ }
+ p.ch = p.src[p.p]
+ p.p++
+ if p.ch == '.' {
+ if len(p.buf) == 0 {
+ p.buf = append(p.buf, '0')
+ }
+ p.buf = append(p.buf, '.')
+ }
+ return true
+}
+
+func (p *NumInfo) digitVal(ch byte) (d int) {
+ switch {
+ case '0' <= ch && ch <= '9':
+ d = int(ch - '0')
+ case ch == '_':
+ p.UseSep = true
+ return 0
+ case 'a' <= ch && ch <= 'f':
+ d = int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ d = int(ch - 'A' + 10)
+ default:
+ return 16 // larger than any legal digit val
+ }
+ return d
+}
+
+func (p *NumInfo) scanMantissa(base int) bool {
+ hasDigit := false
+ var last byte
+ for p.digitVal(p.ch) < base {
+ if p.ch != '_' {
+ p.buf = append(p.buf, p.ch)
+ hasDigit = true
+ }
+ last = p.ch
+ p.next()
+ }
+ if last == '_' {
+ p.err = p.errorf("illegal '_' in number")
+ }
+ return hasDigit
+}
+
+func (p *NumInfo) scanNumber(seenDecimalPoint bool) error {
+ p.base = 10
+
+ if seenDecimalPoint {
+ p.isFloat = true
+ if !p.scanMantissa(10) {
+ return p.errorf("illegal fraction %q", p.src)
+ }
+ goto exponent
+ }
+
+ if p.ch == '0' {
+ // int or float
+ p.next()
+ switch p.ch {
+ case 'x', 'X':
+ p.base = 16
+ // hexadecimal int
+ p.next()
+ if !p.scanMantissa(16) {
+ // only scanned "0x" or "0X"
+ return p.errorf("illegal hexadecimal number %q", p.src)
+ }
+ case 'b':
+ p.base = 2
+ // binary int
+ p.next()
+ if !p.scanMantissa(2) {
+ // only scanned "0b"
+ return p.errorf("illegal binary number %q", p.src)
+ }
+ case 'o':
+ p.base = 8
+ // octal int
+ p.next()
+ if !p.scanMantissa(8) {
+ // only scanned "0o"
+ return p.errorf("illegal octal number %q", p.src)
+ }
+ default:
+ // int (base 8 or 10) or float
+ p.scanMantissa(8)
+ if p.ch == '8' || p.ch == '9' {
+ p.scanMantissa(10)
+ if p.ch != '.' && p.ch != 'e' && p.ch != 'E' {
+ return p.errorf("illegal integer number %q", p.src)
+ }
+ }
+ switch p.ch {
+ case 'e', 'E':
+ if len(p.buf) == 0 {
+ p.buf = append(p.buf, '0')
+ }
+ fallthrough
+ case '.':
+ goto fraction
+ }
+ if len(p.buf) > 0 {
+ p.base = 8
+ }
+ }
+ goto exit
+ }
+
+ // decimal int or float
+ if !p.scanMantissa(10) {
+ return p.errorf("illegal number start %q", p.src)
+ }
+
+fraction:
+ if p.ch == '.' {
+ p.isFloat = true
+ p.next()
+ p.scanMantissa(10)
+ }
+
+exponent:
+ switch p.ch {
+ case 'K', 'M', 'G', 'T', 'P':
+ p.mul = charToMul[p.ch]
+ p.next()
+ if p.ch == 'i' {
+ p.mul |= mulBin
+ p.next()
+ } else {
+ p.mul |= mulDec
+ }
+ var v apd.Decimal
+ p.isFloat = false
+ return p.decimal(&v)
+
+ case 'e', 'E':
+ p.isFloat = true
+ p.next()
+ p.buf = append(p.buf, 'e')
+ if p.ch == '-' || p.ch == '+' {
+ p.buf = append(p.buf, p.ch)
+ p.next()
+ }
+ if !p.scanMantissa(10) {
+ return p.errorf("illegal exponent %q", p.src)
+ }
+ }
+
+exit:
+ return nil
+}
+
+var charToMul = map[byte]Multiplier{
+ 'K': mul1,
+ 'M': mul2,
+ 'G': mul3,
+ 'T': mul4,
+ 'P': mul5,
+ 'E': mul6,
+ 'Z': mul7,
+ 'Y': mul8,
+}
+
+var mulToRat = map[Multiplier]*apd.Decimal{}
+
+func init() {
+ d := apd.New(1, 0)
+ b := apd.New(1, 0)
+ dm := apd.New(1000, 0)
+ bm := apd.New(1024, 0)
+
+ c := apd.BaseContext
+ for i := Multiplier(1); int(i) < len(charToMul); i++ {
+ // TODO: may we write to one of the sources?
+ var bn, dn apd.Decimal
+ _, _ = c.Mul(&dn, d, dm)
+ d = &dn
+ _, _ = c.Mul(&bn, b, bm)
+ b = &bn
+ mulToRat[mulDec|i] = d
+ mulToRat[mulBin|i] = b
+ }
+}
diff --git a/vendor/cuelang.org/go/cue/literal/quote.go b/vendor/cuelang.org/go/cue/literal/quote.go
new file mode 100644
index 0000000000..9cbe6e82f9
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/literal/quote.go
@@ -0,0 +1,370 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package literal
+
+import (
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Form defines how to quote a string or bytes literal.
+type Form struct {
+ hashCount int
+ quote byte
+ multiline bool
+ auto bool
+ exact bool
+ asciiOnly bool
+ graphicOnly bool
+ indent string
+ tripleQuote string
+}
+
+// TODO:
+// - Fixed or max level of escape modifiers (#""#).
+// - Option to fall back to bytes if value cannot be represented as string.
+// E.g. ExactString.
+// - QuoteExact that fails with an error if a string cannot be represented
+// without loss.
+// - Handle auto-breaking for long lines (Swift-style, \-terminated lines).
+// This is not supported yet in CUE, but may, and should be considered as
+// a possibility in API design.
+// - Other possible convenience forms: Blob (auto-break bytes), String (bytes
+// or string), Label.
+
+// WithTabIndent returns a new Form with indentation set to the given number
+// of tabs. The result will be a multiline string.
+func (f Form) WithTabIndent(n int) Form {
+ f.indent = tabs(n)
+ f.multiline = true
+ return f
+}
+
+const tabIndent = "\t\t\t\t\t\t\t\t\t\t\t\t"
+
+func tabs(n int) string {
+ if n < len(tabIndent) {
+ return tabIndent[:n]
+ }
+ return strings.Repeat("\t", n)
+}
+
+// WithOptionalIndent is like WithTabIndent, but only returns a multiline
+// strings if it doesn't contain any newline characters.
+func (f Form) WithOptionalTabIndent(tabs int) Form {
+ if tabs < len(tabIndent) {
+ f.indent = tabIndent[:tabs]
+ } else {
+ f.indent = strings.Repeat("\t", tabs)
+ }
+ f.auto = true
+ return f
+}
+
+// WithASCIIOnly ensures the quoted strings consists solely of valid ASCII
+// characters.
+func (f Form) WithASCIIOnly() Form {
+ f.asciiOnly = true
+ return f
+}
+
+// WithGraphicOnly ensures the quoted strings consists solely of printable
+// characters.
+func (f Form) WithGraphicOnly() Form {
+ f.graphicOnly = true
+ return f
+}
+
+var (
+ // String defines the format of a CUE string. Conversions may be lossy.
+ String Form = stringForm
+
+ // TODO: ExactString: quotes to bytes type if the string cannot be
+ // represented without loss of accuracy.
+
+ // Label is like Text, but optimized for labels.
+ Label Form = stringForm
+
+ // Bytes defines the format of bytes literal.
+ Bytes Form = bytesForm
+
+ stringForm = Form{
+ quote: '"',
+ tripleQuote: `"""`,
+ }
+ bytesForm = Form{
+ quote: '\'',
+ tripleQuote: `'''`,
+ exact: true,
+ }
+)
+
+// Quote returns CUE string literal representing s. The returned string uses CUE
+// escape sequences (\t, \n, \u00FF, \u0100) for control characters and
+// non-printable characters as defined by strconv.IsPrint.
+//
+// It reports an error if the string cannot be converted to the desired form.
+func (f Form) Quote(s string) string {
+ return string(f.Append(make([]byte, 0, 3*len(s)/2), s))
+}
+
+const (
+ lowerhex = "0123456789abcdef"
+)
+
+// Append appends a CUE string literal representing s, as generated by Quote, to
+// buf and returns the extended buffer.
+func (f Form) Append(buf []byte, s string) []byte {
+ if f.auto && strings.ContainsRune(s, '\n') {
+ f.multiline = true
+ }
+ if f.multiline {
+ f.hashCount = f.requiredHashCount(s)
+ }
+
+ // Often called with big strings, so preallocate. If there's quoting,
+ // this is conservative but still helps a lot.
+ if cap(buf)-len(buf) < len(s) {
+ nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1)
+ copy(nBuf, buf)
+ buf = nBuf
+ }
+ for i := 0; i < f.hashCount; i++ {
+ buf = append(buf, '#')
+ }
+ if f.multiline {
+ buf = append(buf, f.quote, f.quote, f.quote, '\n')
+ if s == "" {
+ buf = append(buf, f.indent...)
+ buf = append(buf, f.quote, f.quote, f.quote)
+ return buf
+ }
+ if len(s) > 0 && s[0] != '\n' {
+ buf = append(buf, f.indent...)
+ }
+ } else {
+ buf = append(buf, f.quote)
+ }
+
+ buf = f.appendEscaped(buf, s)
+
+ if f.multiline {
+ buf = append(buf, '\n')
+ buf = append(buf, f.indent...)
+ buf = append(buf, f.quote, f.quote, f.quote)
+ } else {
+ buf = append(buf, f.quote)
+ }
+ for i := 0; i < f.hashCount; i++ {
+ buf = append(buf, '#')
+ }
+
+ return buf
+}
+
+// AppendEscaped appends a CUE string literal representing s, as generated by
+// Quote but without the quotes, to buf and returns the extended buffer.
+//
+// It does not include the last indentation.
+func (f Form) AppendEscaped(buf []byte, s string) []byte {
+ if f.auto && strings.ContainsRune(s, '\n') {
+ f.multiline = true
+ }
+
+ // Often called with big strings, so preallocate. If there's quoting,
+ // this is conservative but still helps a lot.
+ if cap(buf)-len(buf) < len(s) {
+ nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1)
+ copy(nBuf, buf)
+ buf = nBuf
+ }
+
+ buf = f.appendEscaped(buf, s)
+
+ return buf
+}
+
+func (f Form) appendEscaped(buf []byte, s string) []byte {
+ for width := 0; len(s) > 0; s = s[width:] {
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if f.exact && width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ if f.multiline && r == '\n' {
+ buf = append(buf, '\n')
+ if len(s) > 1 && s[1] != '\n' {
+ buf = append(buf, f.indent...)
+ }
+ continue
+ }
+ buf = f.appendEscapedRune(buf, r)
+ }
+ return buf
+}
+
+func (f *Form) appendEscapedRune(buf []byte, r rune) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ if (!f.multiline && r == rune(f.quote)) || r == '\\' { // always backslashed
+ buf = f.appendEscape(buf)
+ buf = append(buf, byte(r))
+ return buf
+ }
+ if f.asciiOnly {
+ if r < utf8.RuneSelf && strconv.IsPrint(r) {
+ buf = append(buf, byte(r))
+ return buf
+ }
+ } else if strconv.IsPrint(r) || f.graphicOnly && isInGraphicList(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ return buf
+ }
+ buf = f.appendEscape(buf)
+ switch r {
+ case '\a':
+ buf = append(buf, 'a')
+ case '\b':
+ buf = append(buf, 'b')
+ case '\f':
+ buf = append(buf, 'f')
+ case '\n':
+ buf = append(buf, 'n')
+ case '\r':
+ buf = append(buf, 'r')
+ case '\t':
+ buf = append(buf, 't')
+ case '\v':
+ buf = append(buf, 'v')
+ default:
+ switch {
+ case r < ' ' && f.exact:
+ buf = append(buf, 'x')
+ buf = append(buf, lowerhex[byte(r)>>4])
+ buf = append(buf, lowerhex[byte(r)&0xF])
+ case r > utf8.MaxRune:
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, 'u')
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, 'U')
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ return buf
+}
+
+func (f *Form) appendEscape(buf []byte) []byte {
+ buf = append(buf, '\\')
+ for i := 0; i < f.hashCount; i++ {
+ buf = append(buf, '#')
+ }
+ return buf
+}
+
+// requiredHashCount returns the number of # characters
+// that are required to quote the multiline string s.
+func (f *Form) requiredHashCount(s string) int {
+ hashCount := 0
+ i := 0
+ // Find all occurrences of the triple-quote and count
+ // the maximum number of succeeding # characters.
+ for {
+ j := strings.Index(s[i:], f.tripleQuote)
+ if j == -1 {
+ break
+ }
+ i += j + 3
+ // Absorb all extra quotes, so we
+ // get to the end of the sequence.
+ for ; i < len(s); i++ {
+ if s[i] != f.quote {
+ break
+ }
+ }
+ e := i - 1
+ // Count succeeding # characters.
+ for ; i < len(s); i++ {
+ if s[i] != '#' {
+ break
+ }
+ }
+ if nhash := i - e; nhash > hashCount {
+ hashCount = nhash
+ }
+ }
+ return hashCount
+}
+
+// isInGraphicList reports whether the rune is in the isGraphic list. This separation
+// from IsGraphic allows quoteWith to avoid two calls to IsPrint.
+// Should be called only if IsPrint fails.
+func isInGraphicList(r rune) bool {
+ // We know r must fit in 16 bits - see makeisprint.go.
+ if r > 0xFFFF {
+ return false
+ }
+ rr := uint16(r)
+ i := bsearch16(isGraphic, rr)
+ return i < len(isGraphic) && rr == isGraphic[i]
+}
+
+// bsearch16 returns the smallest i such that a[i] >= x.
+// If there is no such i, bsearch16 returns len(a).
+func bsearch16(a []uint16, x uint16) int {
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2
+ if a[h] < x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i
+}
+
+// isGraphic lists the graphic runes not matched by IsPrint.
+var isGraphic = []uint16{
+ 0x00a0,
+ 0x1680,
+ 0x2000,
+ 0x2001,
+ 0x2002,
+ 0x2003,
+ 0x2004,
+ 0x2005,
+ 0x2006,
+ 0x2007,
+ 0x2008,
+ 0x2009,
+ 0x200a,
+ 0x202f,
+ 0x205f,
+ 0x3000,
+}
diff --git a/vendor/cuelang.org/go/cue/literal/string.go b/vendor/cuelang.org/go/cue/literal/string.go
new file mode 100644
index 0000000000..59fae0a60e
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/literal/string.go
@@ -0,0 +1,421 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package literal
+
+import (
+ "errors"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ errSyntax = errors.New("invalid syntax")
+ errInvalidWhitespace = errors.New("invalid string: invalid whitespace")
+ errMissingNewline = errors.New(
+ "invalid string: opening quote of multiline string must be followed by newline")
+ errUnmatchedQuote = errors.New("invalid string: unmatched quote")
+ // TODO: making this an error is optional according to RFC 4627. But we
+ // could make it not an error if this ever results in an issue.
+ errSurrogate = errors.New("unmatched surrogate pair")
+)
+
+// Unquote interprets s as a single- or double-quoted, single- or multi-line
+// string, possibly with custom escape delimiters, returning the string value
+// that s quotes.
+func Unquote(s string) (string, error) {
+ info, nStart, _, err := ParseQuotes(s, s)
+ if err != nil {
+ return "", err
+ }
+ s = s[nStart:]
+ return info.Unquote(s)
+}
+
+// QuoteInfo describes the type of quotes used for a string.
+type QuoteInfo struct {
+ quote string
+ whitespace string
+ numHash int
+ multiline bool
+ char byte
+ numChar byte
+}
+
+// IsDouble reports whether the literal uses double quotes.
+func (q QuoteInfo) IsDouble() bool {
+ return q.char == '"'
+}
+
+// IsMulti reports whether a multi-line string was parsed.
+func (q QuoteInfo) IsMulti() bool {
+ return q.multiline
+}
+
+// Whitespace returns prefix whitespace for multiline strings.
+func (q QuoteInfo) Whitespace() string {
+ return q.whitespace
+}
+
+// ParseQuotes checks if the opening quotes in start matches the ending quotes
+// in end and reports its type as q or an error if they do not matching or are
+// invalid. nStart indicates the number of bytes used for the opening quote.
+func ParseQuotes(start, end string) (q QuoteInfo, nStart, nEnd int, err error) {
+ for i, c := range start {
+ if c != '#' {
+ break
+ }
+ q.numHash = i + 1
+ }
+ s := start[q.numHash:]
+ switch s[0] {
+ case '"', '\'':
+ q.char = s[0]
+ if len(s) > 3 && s[1] == s[0] && s[2] == s[0] {
+ switch s[3] {
+ case '\n':
+ q.quote = start[:3+q.numHash]
+ case '\r':
+ if len(s) > 4 && s[4] == '\n' {
+ q.quote = start[:4+q.numHash]
+ break
+ }
+ fallthrough
+ default:
+ return q, 0, 0, errMissingNewline
+ }
+ q.multiline = true
+ q.numChar = 3
+ nStart = len(q.quote) + 1 // add whitespace later
+ } else {
+ q.quote = start[:1+q.numHash]
+ q.numChar = 1
+ nStart = len(q.quote)
+ }
+ default:
+ return q, 0, 0, errSyntax
+ }
+ quote := start[:int(q.numChar)+q.numHash]
+ for i := 0; i < len(quote); i++ {
+ if j := len(end) - i - 1; j < 0 || quote[i] != end[j] {
+ return q, 0, 0, errUnmatchedQuote
+ }
+ }
+ if q.multiline {
+ i := len(end) - len(quote)
+ for i > 0 {
+ r, size := utf8.DecodeLastRuneInString(end[:i])
+ if r == '\n' || !unicode.IsSpace(r) {
+ break
+ }
+ i -= size
+ }
+ q.whitespace = end[i : len(end)-len(quote)]
+
+ if len(start) > nStart && start[nStart] != '\n' {
+ if !strings.HasPrefix(start[nStart:], q.whitespace) {
+ return q, 0, 0, errInvalidWhitespace
+ }
+ nStart += len(q.whitespace)
+ }
+ }
+
+ return q, nStart, int(q.numChar) + q.numHash, nil
+}
+
+// Unquote unquotes the given string. It must be terminated with a quote or an
+// interpolation start. Escape sequences are expanded and surrogates
+// are replaced with the corresponding non-surrogate code points.
+func (q QuoteInfo) Unquote(s string) (string, error) {
+ if len(s) > 0 && !q.multiline {
+ if contains(s, '\n') || contains(s, '\r') {
+ return "", errSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if s[len(s)-1] == q.char && q.numHash == 0 {
+ if s := s[:len(s)-1]; isSimple(s, rune(q.char)) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ stripNL := false
+ for len(s) > 0 {
+ switch s[0] {
+ case '\r':
+ s = s[1:]
+ continue
+ case '\n':
+ switch {
+ case !q.multiline:
+ fallthrough
+ default:
+ return "", errInvalidWhitespace
+ case strings.HasPrefix(s[1:], q.whitespace):
+ s = s[1+len(q.whitespace):]
+ case strings.HasPrefix(s[1:], "\n"):
+ s = s[1:]
+ }
+ stripNL = true
+ buf = append(buf, '\n')
+ continue
+ }
+ c, multibyte, ss, err := unquoteChar(s, q)
+ if surHigh <= c && c < surEnd {
+ if c >= surLow {
+ return "", errSurrogate
+ }
+ var cl rune
+ cl, _, ss, err = unquoteChar(ss, q)
+ if cl < surLow || surEnd <= cl {
+ return "", errSurrogate
+ }
+ c = 0x10000 + (c-surHigh)*0x400 + (cl - surLow)
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ s = ss
+ if c < 0 {
+ if c == -2 {
+ stripNL = false
+ }
+ if stripNL {
+ // Strip the last newline, but only if it came from a closing
+ // quote.
+ buf = buf[:len(buf)-1]
+ }
+ return string(buf), nil
+ }
+ stripNL = false
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ }
+ // allow unmatched quotes if already checked.
+ return "", errUnmatchedQuote
+}
+
+const (
+ surHigh = 0xD800
+ surLow = 0xDC00
+ surEnd = 0xE000
+)
+
+func isSimple(s string, quote rune) bool {
+ // TODO(perf): check if using a simple DFA to detect surrogate pairs is
+ // faster than converting to code points. At the very least there should
+ // be an ASCII fast path.
+ for _, r := range s {
+ if r == quote || r == '\\' {
+ return false
+ }
+ if surHigh <= r && r < surEnd {
+ return false
+ }
+ }
+ return true
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+// unquoteChar decodes the first character or byte in the escaped string.
+// It returns four values:
+//
+// 1) value, the decoded Unicode code point or byte value; the special value
+// of -1 indicates terminated by quotes and -2 means terminated by \(.
+// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+// 3) tail, the remainder of the string after the character; and
+// 4) an error that will be nil if the character is syntactically valid.
+//
+// The second argument, kind, specifies the type of literal being parsed
+// and therefore which kind of escape sequences are permitted.
+// For kind 's' only JSON escapes and \u{ are permitted.
+// For kind 'b' also hexadecimal and octal escape sequences are permitted.
+//
+// The third argument, quote, specifies that an ASCII quoting character that
+// is not permitted in the output.
+func unquoteChar(s string, info QuoteInfo) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == info.char && info.char != 0:
+ for i := 1; byte(i) < info.numChar; i++ {
+ if i >= len(s) || s[i] != info.char {
+ return rune(info.char), false, s[1:], nil
+ }
+ }
+ for i := 0; i < info.numHash; i++ {
+ if i+int(info.numChar) >= len(s) || s[i+int(info.numChar)] != '#' {
+ return rune(info.char), false, s[1:], nil
+ }
+ }
+ if ln := int(info.numChar) + info.numHash; len(s) != ln {
+ // TODO: terminating quote in middle of string
+ return 0, false, s[ln:], errSyntax
+ }
+ return -1, false, "", nil
+ case c >= utf8.RuneSelf:
+ // TODO: consider handling surrogate values. These are discarded by
+ // DecodeRuneInString. It is technically correct to disallow it, but
+ // some JSON parsers allow this anyway.
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ if len(s) <= 1+info.numHash {
+ return '\\', false, s[1:], nil
+ }
+ for i := 1; i <= info.numHash && i < len(s); i++ {
+ if s[i] != '#' {
+ return '\\', false, s[1:], nil
+ }
+ }
+
+ c := s[1+info.numHash]
+ s = s[2+info.numHash:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case '/':
+ value = '/'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = errSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = errSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ if info.char == '"' {
+ err = errSyntax
+ return
+ }
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = errSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if info.char == '"' {
+ err = errSyntax
+ return
+ }
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = errSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = errSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = errSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ // TODO: should we allow escaping of quotes regardless?
+ if c != info.char {
+ err = errSyntax
+ return
+ }
+ value = rune(c)
+ case '(':
+ if s != "" {
+ // TODO: terminating quote in middle of string
+ return 0, false, s, errSyntax
+ }
+ value = -2
+ default:
+ err = errSyntax
+ return
+ }
+ tail = s
+ return
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
diff --git a/vendor/cuelang.org/go/cue/marshal.go b/vendor/cuelang.org/go/cue/marshal.go
new file mode 100644
index 0000000000..43ca6d4639
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/marshal.go
@@ -0,0 +1,221 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/gob"
+ "path/filepath"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/format"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/export"
+)
+
+// root.
+type instanceData struct {
+ Root bool
+ Path string
+ Files []fileData
+}
+
+type fileData struct {
+ Name string
+ Data []byte
+}
+
+const version = 1
+
+type unmarshaller struct {
+ ctxt *build.Context
+ imports map[string]*instanceData
+}
+
+func (b *unmarshaller) load(pos token.Pos, path string) *build.Instance {
+ bi := b.imports[path]
+ if bi == nil {
+ return nil
+ }
+ return b.build(bi)
+}
+
+func (b *unmarshaller) build(bi *instanceData) *build.Instance {
+ p := b.ctxt.NewInstance(bi.Path, b.load)
+ p.ImportPath = bi.Path
+ for _, f := range bi.Files {
+ _ = p.AddFile(f.Name, f.Data)
+ }
+ p.Complete()
+ return p
+}
+
+func compileInstances(r *Runtime, data []*instanceData) (instances []*Instance, err error) {
+ b := unmarshaller{
+ ctxt: build.NewContext(),
+ imports: map[string]*instanceData{},
+ }
+ for _, i := range data {
+ if i.Path == "" {
+ if !i.Root {
+ return nil, errors.Newf(token.NoPos,
+ "data contains non-root package without import path")
+ }
+ continue
+ }
+ b.imports[i.Path] = i
+ }
+
+ builds := []*build.Instance{}
+ for _, i := range data {
+ if !i.Root {
+ continue
+ }
+ builds = append(builds, b.build(i))
+ }
+
+ return r.build(builds)
+}
+
+// Unmarshal creates an Instance from bytes generated by the MarshalBinary
+// method of an instance.
+func (r *Runtime) Unmarshal(b []byte) ([]*Instance, error) {
+ if len(b) == 0 {
+ return nil, errors.Newf(token.NoPos, "unmarshal failed: empty buffer")
+ }
+
+ switch b[0] {
+ case version:
+ default:
+ return nil, errors.Newf(token.NoPos,
+ "unmarshal failed: unsupported version %d, regenerate data", b[0])
+ }
+
+ reader, err := gzip.NewReader(bytes.NewReader(b[1:]))
+ if err != nil {
+ return nil, errors.Newf(token.NoPos, "unmarshal failed: %v", err)
+ }
+
+ data := []*instanceData{}
+ err = gob.NewDecoder(reader).Decode(&data)
+ if err != nil {
+ return nil, errors.Newf(token.NoPos, "unmarshal failed: %v", err)
+ }
+
+ return compileInstances(r, data)
+}
+
+// Marshal creates bytes from a group of instances. Imported instances will
+// be included in the emission.
+//
+// The stored instances are functionally the same, but preserving of file
+// information is only done on a best-effort basis.
+func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) {
+ staged := []instanceData{}
+ done := map[string]int{}
+
+ var errs errors.Error
+
+ var stageInstance func(i *Instance) (pos int)
+ stageInstance = func(i *Instance) (pos int) {
+ if p, ok := done[i.ImportPath]; ok {
+ return p
+ }
+ // TODO: support exporting instance
+ file, _ := export.Def(r.runtime(), i.inst.ID(), i.root)
+ imports := []string{}
+ file.VisitImports(func(i *ast.ImportDecl) {
+ for _, spec := range i.Specs {
+ info, _ := astutil.ParseImportSpec(spec)
+ imports = append(imports, info.ID)
+ }
+ })
+
+ if i.PkgName != "" {
+ p, name, _ := internal.PackageInfo(file)
+ if p == nil {
+ pkg := &ast.Package{Name: ast.NewIdent(i.PkgName)}
+ file.Decls = append([]ast.Decl{pkg}, file.Decls...)
+ } else if name != i.PkgName {
+ // p is guaranteed to be generated by Def, so it is "safe" to
+ // modify.
+ p.Name = ast.NewIdent(i.PkgName)
+ }
+ }
+
+ b, err := format.Node(file)
+ errs = errors.Append(errs, errors.Promote(err, "marshal"))
+
+ filename := "unmarshal"
+ if i.inst != nil && len(i.inst.Files) == 1 {
+ filename = i.inst.Files[0].Filename
+
+ dir := i.Dir
+ if i.inst != nil && i.inst.Root != "" {
+ dir = i.inst.Root
+ }
+ if dir != "" {
+ filename = filepath.FromSlash(filename)
+ filename, _ = filepath.Rel(dir, filename)
+ filename = filepath.ToSlash(filename)
+ }
+ }
+ // TODO: this should probably be changed upstream, but as the path
+ // is for reference purposes only, this is safe.
+ importPath := filepath.ToSlash(i.ImportPath)
+
+ staged = append(staged, instanceData{
+ Path: importPath,
+ Files: []fileData{{filename, b}},
+ })
+
+ p := len(staged) - 1
+
+ for _, imp := range imports {
+ i := getImportFromPath(r.runtime(), imp)
+ if i == nil || !strings.Contains(imp, ".") {
+ continue // a builtin package.
+ }
+ stageInstance(i)
+ }
+
+ return p
+ }
+
+ for _, i := range instances {
+ staged[stageInstance(i)].Root = true
+ }
+
+ buf := &bytes.Buffer{}
+ buf.WriteByte(version)
+
+ zw := gzip.NewWriter(buf)
+ if err := gob.NewEncoder(zw).Encode(staged); err != nil {
+ return nil, err
+ }
+
+ if err := zw.Close(); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+
+}
diff --git a/vendor/cuelang.org/go/cue/op.go b/vendor/cuelang.org/go/cue/op.go
new file mode 100644
index 0000000000..22b31a5d12
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/op.go
@@ -0,0 +1,182 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+// Op indicates the operation at the top of an expression tree of the expression
+// use to evaluate a value.
+type Op = adt.Op
+
+// Values of Op.
+const (
+ NoOp Op = adt.NoOp
+
+ AndOp Op = adt.AndOp
+ OrOp Op = adt.OrOp
+
+ SelectorOp Op = adt.SelectorOp
+ IndexOp Op = adt.IndexOp
+ SliceOp Op = adt.SliceOp
+ CallOp Op = adt.CallOp
+
+ BooleanAndOp Op = adt.BoolAndOp
+ BooleanOrOp Op = adt.BoolOrOp
+
+ EqualOp Op = adt.EqualOp
+ NotOp Op = adt.NotOp
+ NotEqualOp Op = adt.NotEqualOp
+ LessThanOp Op = adt.LessThanOp
+ LessThanEqualOp Op = adt.LessEqualOp
+ GreaterThanOp Op = adt.GreaterThanOp
+ GreaterThanEqualOp Op = adt.GreaterEqualOp
+
+ RegexMatchOp Op = adt.MatchOp
+ NotRegexMatchOp Op = adt.NotMatchOp
+
+ AddOp Op = adt.AddOp
+ SubtractOp Op = adt.SubtractOp
+ MultiplyOp Op = adt.MultiplyOp
+ FloatQuotientOp Op = adt.FloatQuotientOp
+ IntQuotientOp Op = adt.IntQuotientOp
+ IntRemainderOp Op = adt.IntRemainderOp
+ IntDivideOp Op = adt.IntDivideOp
+ IntModuloOp Op = adt.IntModuloOp
+
+ InterpolationOp Op = adt.InterpolationOp
+)
+
+// isCmp reports whether an op is a comparator.
+func (op op) isCmp() bool {
+ return opEql <= op && op <= opGeq
+}
+
+func (op op) unifyType() (unchecked, ok bool) {
+ if op == opUnifyUnchecked {
+ return true, true
+ }
+ return false, op == opUnify
+}
+
+type op uint16
+
+const (
+ opUnknown op = iota
+
+ opUnify
+ opUnifyUnchecked
+ opDisjunction
+
+ opLand
+ opLor
+ opNot
+
+ opEql
+ opNeq
+ opMat
+ opNMat
+
+ opLss
+ opGtr
+ opLeq
+ opGeq
+
+ opAdd
+ opSub
+ opMul
+ opQuo
+ opRem
+
+ opIDiv
+ opIMod
+ opIQuo
+ opIRem
+)
+
+var opStrings = []string{
+ opUnknown: "??",
+
+ opUnify: "&",
+ // opUnifyUnchecked is internal only. Syntactically this is
+ // represented as embedding.
+ opUnifyUnchecked: "&!",
+ opDisjunction: "|",
+
+ opLand: "&&",
+ opLor: "||",
+ opNot: "!",
+
+ opEql: "==",
+ opNeq: "!=",
+ opMat: "=~",
+ opNMat: "!~",
+
+ opLss: "<",
+ opGtr: ">",
+ opLeq: "<=",
+ opGeq: ">=",
+
+ opAdd: "+",
+ opSub: "-",
+ opMul: "*",
+ opQuo: "/",
+
+ opIDiv: "div",
+ opIMod: "mod",
+ opIQuo: "quo",
+ opIRem: "rem",
+}
+
+func (op op) String() string { return opStrings[op] }
+
+var tokenMap = map[token.Token]op{
+ token.OR: opDisjunction, // |
+ token.AND: opUnify, // &
+
+ token.ADD: opAdd, // +
+ token.SUB: opSub, // -
+ token.MUL: opMul, // *
+ token.QUO: opQuo, // /
+
+ token.IDIV: opIDiv, // div
+ token.IMOD: opIMod, // mod
+ token.IQUO: opIQuo, // quo
+ token.IREM: opIRem, // rem
+
+ token.LAND: opLand, // &&
+ token.LOR: opLor, // ||
+
+ token.EQL: opEql, // ==
+ token.LSS: opLss, // <
+ token.GTR: opGtr, // >
+ token.NOT: opNot, // !
+
+ token.NEQ: opNeq, // !=
+ token.LEQ: opLeq, // <=
+ token.GEQ: opGeq, // >=
+ token.MAT: opMat, // =~
+ token.NMAT: opNMat, // !~
+}
+
+var opMap = map[op]token.Token{}
+
+func init() {
+ for t, o := range tokenMap {
+ opMap[o] = t
+ }
+}
diff --git a/vendor/cuelang.org/go/cue/parser/doc.go b/vendor/cuelang.org/go/cue/parser/doc.go
new file mode 100644
index 0000000000..adde13989b
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/parser/doc.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package parser implements a parser for CUE source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the output
+// is an abstract syntax tree (AST) representing the CUE source. The parser is
+// invoked through one of the Parse* functions.
+//
+// The parser accepts a larger language than is syntactically permitted by the
+// CUE spec, for simplicity, and for improved robustness in the presence of
+// syntax errors.
+package parser // import "cuelang.org/go/cue/parser"
diff --git a/vendor/cuelang.org/go/cue/parser/fuzz.go b/vendor/cuelang.org/go/cue/parser/fuzz.go
new file mode 100644
index 0000000000..21a1d087d6
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/parser/fuzz.go
@@ -0,0 +1,26 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+// +build gofuzz
+
+package parser
+
+func Fuzz(b []byte) int {
+ _, err := ParseFile("go-fuzz", b)
+ if err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/cuelang.org/go/cue/parser/interface.go b/vendor/cuelang.org/go/cue/parser/interface.go
new file mode 100644
index 0000000000..8695a6c34a
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/parser/interface.go
@@ -0,0 +1,232 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains the exported entry points for invoking the
+
+package parser
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/source"
+)
+
+// Option specifies a parse option.
+type Option func(p *parser)
+
+var (
+ // PackageClauseOnly causes parsing to stop after the package clause.
+ PackageClauseOnly Option = packageClauseOnly
+ packageClauseOnly = func(p *parser) {
+ p.mode |= packageClauseOnlyMode
+ }
+
+ // ImportsOnly causes parsing to stop parsing after the import declarations.
+ ImportsOnly Option = importsOnly
+ importsOnly = func(p *parser) {
+ p.mode |= importsOnlyMode
+ }
+
+ // ParseComments causes comments to be parsed.
+ ParseComments Option = parseComments
+ parseComments = func(p *parser) {
+ p.mode |= parseCommentsMode
+ }
+
+ // Trace causes parsing to print a trace of parsed productions.
+ Trace Option = traceOpt
+ traceOpt = func(p *parser) {
+ p.mode |= traceMode
+ }
+
+ // DeclarationErrors causes parsing to report declaration errors.
+ DeclarationErrors Option = declarationErrors
+ declarationErrors = func(p *parser) {
+ p.mode |= declarationErrorsMode
+ }
+
+ // AllErrors causes all errors to be reported (not just the first 10 on different lines).
+ AllErrors Option = allErrors
+ allErrors = func(p *parser) {
+ p.mode |= allErrorsMode
+ }
+
+ // AllowPartial allows the parser to be used on a prefix buffer.
+ AllowPartial Option = allowPartial
+ allowPartial = func(p *parser) {
+ p.mode |= partialMode
+ }
+)
+
+// FromVersion specifies until which legacy version the parser should provide
+// backwards compatibility.
+func FromVersion(version int) Option {
+ if version >= 0 {
+ version++
+ }
+ // Versions:
+ // <0: major version 0 (counting -1000 + x, where x = 100*m+p in 0.m.p
+ // >=0: x+1 in 1.x.y
+ return func(p *parser) { p.version = version }
+}
+
+func version0(minor, patch int) int {
+ return -1000 + 100*minor + patch
+}
+
+// DeprecationError is a sentinel error to indicate that an error is
+// related to an unsupported old CUE syntax.
+type DeprecationError struct {
+ Version int
+}
+
+func (e *DeprecationError) Error() string {
+ return "try running `cue fix` (possibly with an earlier version, like v0.2.2) to upgrade"
+}
+
+// Latest specifies the latest version of the parser, effectively setting
+// the strictest implementation.
+const Latest = latest
+
+const latest = -600
+
+// FileOffset specifies the File position info to use.
+func FileOffset(pos int) Option {
+ return func(p *parser) { p.offset = pos }
+}
+
+// A mode value is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+type mode uint
+
+const (
+ packageClauseOnlyMode mode = 1 << iota // stop parsing after package clause
+ importsOnlyMode // stop parsing after import declarations
+ parseCommentsMode // parse comments and add them to AST
+ partialMode
+ traceMode // print a trace of parsed productions
+ declarationErrorsMode // report declaration errors
+ allErrorsMode // report all errors (not just the first 10 on different lines)
+)
+
+// ParseFile parses the source code of a single CUE source file and returns
+// the corresponding File node. The source code may be provided via
+// the filename of the source file, or via the src parameter.
+//
+// If src != nil, ParseFile parses the source from src and the filename is
+// only used when recording position information. The type of the argument
+// for the src parameter must be string, []byte, or io.Reader.
+// If src == nil, ParseFile parses the file specified by filename.
+//
+// The mode parameter controls the amount of source text parsed and other
+// optional parser functionality. Position information is recorded in the
+// file set fset, which must not be nil.
+//
+// If the source couldn't be read, the returned AST is nil and the error
+// indicates the specific failure. If the source was read but syntax
+// errors were found, the result is a partial AST (with Bad* nodes
+// representing the fragments of erroneous source code). Multiple errors
+// are returned via a ErrorList which is sorted by file position.
+func ParseFile(filename string, src interface{}, mode ...Option) (f *ast.File, err error) {
+
+ // get source
+ text, err := source.Read(filename, src)
+ if err != nil {
+ return nil, err
+ }
+
+ var pp parser
+ defer func() {
+ if pp.panicking {
+ _ = recover()
+ }
+
+ // set result values
+ if f == nil {
+ // source is not a valid Go source file - satisfy
+ // ParseFile API and return a valid (but) empty
+ // *File
+ f = &ast.File{
+ // Scope: NewScope(nil),
+ }
+ }
+
+ err = errors.Sanitize(pp.errors)
+ }()
+
+ // parse source
+ pp.init(filename, text, mode)
+ f = pp.parseFile()
+ if f == nil {
+ return nil, pp.errors
+ }
+ f.Filename = filename
+ astutil.Resolve(f, pp.errf)
+
+ return f, pp.errors
+}
+
+// ParseExpr is a convenience function for parsing an expression.
+// The arguments have the same meaning as for Parse, but the source must
+// be a valid CUE (type or value) expression. Specifically, fset must not
+// be nil.
+func ParseExpr(filename string, src interface{}, mode ...Option) (ast.Expr, error) {
+ // get source
+ text, err := source.Read(filename, src)
+ if err != nil {
+ return nil, err
+ }
+
+ var p parser
+ defer func() {
+ if p.panicking {
+ _ = recover()
+ }
+ err = errors.Sanitize(p.errors)
+ }()
+
+ // parse expr
+ p.init(filename, text, mode)
+ // Set up pkg-level scopes to avoid nil-pointer errors.
+ // This is not needed for a correct expression x as the
+ // parser will be ok with a nil topScope, but be cautious
+ // in case of an erroneous x.
+ e := p.parseRHS()
+
+ // If a comma was inserted, consume it;
+ // report an error if there's more tokens.
+ if p.tok == token.COMMA && p.lit == "\n" {
+ p.next()
+ }
+ if p.mode&partialMode == 0 {
+ p.expect(token.EOF)
+ }
+
+ if p.errors != nil {
+ return nil, p.errors
+ }
+ astutil.ResolveExpr(e, p.errf)
+
+ return e, p.errors
+}
+
+// parseExprString is a convenience function for obtaining the AST of an
+// expression x. The position information recorded in the AST is undefined. The
+// filename used in error messages is the empty string.
+func parseExprString(x string) (ast.Expr, error) {
+ return ParseExpr("", []byte(x))
+}
diff --git a/vendor/cuelang.org/go/cue/parser/parser.go b/vendor/cuelang.org/go/cue/parser/parser.go
new file mode 100644
index 0000000000..e91b3014e8
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/parser/parser.go
@@ -0,0 +1,1669 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/scanner"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/astinternal"
+)
+
+var debugStr = astinternal.DebugStr
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+ file *token.File
+ offset int
+ errors errors.Error
+ scanner scanner.Scanner
+
+ // Tracing/debugging
+ mode mode // parsing mode
+ trace bool // == (mode & Trace != 0)
+ panicking bool // set if we are bailing out due to too many errors.
+ indent int // indentation used for tracing output
+
+ // Comments
+ leadComment *ast.CommentGroup
+ comments *commentState
+
+ // Next token
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
+
+ // Error recovery
+ // (used to limit the number of calls to syncXXX functions
+ // w/o making scanning progress - avoids potential endless
+ // loops across multiple parser functions during error recovery)
+ syncPos token.Pos // last synchronization position
+ syncCnt int // number of calls to syncXXX without progress
+
+ // Non-syntactic parser control
+ exprLev int // < 0: in control clause, >= 0: in expression
+
+ imports []*ast.ImportSpec // list of imports
+
+ version int
+}
+
+func (p *parser) init(filename string, src []byte, mode []Option) {
+ p.offset = -1
+ for _, f := range mode {
+ f(p)
+ }
+ p.file = token.NewFile(filename, p.offset, len(src))
+
+ var m scanner.Mode
+ if p.mode&parseCommentsMode != 0 {
+ m = scanner.ScanComments
+ }
+ eh := func(pos token.Pos, msg string, args []interface{}) {
+ p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...))
+ }
+ p.scanner.Init(p.file, src, eh, m)
+
+ p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently)
+
+ p.comments = &commentState{pos: -1}
+
+ p.next()
+}
+
+type commentState struct {
+ parent *commentState
+ pos int8
+ groups []*ast.CommentGroup
+
+ // lists are not attached to nodes themselves. Enclosed expressions may
+ // miss a comment due to commas and line termination. closeLists ensures
+ // that comments will be passed to someone.
+ isList int
+ lastChild ast.Node
+ lastPos int8
+}
+
+// openComments reserves the next doc comment for the caller and flushes
+func (p *parser) openComments() *commentState {
+ child := &commentState{
+ parent: p.comments,
+ }
+ if c := p.comments; c != nil && c.isList > 0 {
+ if c.lastChild != nil {
+ var groups []*ast.CommentGroup
+ for _, cg := range c.groups {
+ if cg.Position == 0 {
+ groups = append(groups, cg)
+ }
+ }
+ groups = append(groups, c.lastChild.Comments()...)
+ for _, cg := range c.groups {
+ if cg.Position != 0 {
+ cg.Position = c.lastPos
+ groups = append(groups, cg)
+ }
+ }
+ ast.SetComments(c.lastChild, groups)
+ c.groups = nil
+ } else {
+ c.lastChild = nil
+ // attach before next
+ for _, cg := range c.groups {
+ cg.Position = 0
+ }
+ child.groups = c.groups
+ c.groups = nil
+ }
+ }
+ if p.leadComment != nil {
+ child.groups = append(child.groups, p.leadComment)
+ p.leadComment = nil
+ }
+ p.comments = child
+ return child
+}
+
+// openList is used to treat a list of comments as a single comment
+// position in a production.
+func (p *parser) openList() {
+ if p.comments.isList > 0 {
+ p.comments.isList++
+ return
+ }
+ c := &commentState{
+ parent: p.comments,
+ isList: 1,
+ }
+ p.comments = c
+}
+
+func (c *commentState) add(g *ast.CommentGroup) {
+ g.Position = c.pos
+ c.groups = append(c.groups, g)
+}
+
+func (p *parser) closeList() {
+ c := p.comments
+ if c.lastChild != nil {
+ for _, cg := range c.groups {
+ cg.Position = c.lastPos
+ c.lastChild.AddComment(cg)
+ }
+ c.groups = nil
+ }
+ switch c.isList--; {
+ case c.isList < 0:
+ if !p.panicking {
+ err := errors.Newf(p.pos, "unmatched close list")
+ p.errors = errors.Append(p.errors, err)
+ p.panicking = true
+ panic(err)
+ }
+ case c.isList == 0:
+ parent := c.parent
+ if len(c.groups) > 0 {
+ parent.groups = append(parent.groups, c.groups...)
+ }
+ parent.pos++
+ p.comments = parent
+ }
+}
+
+func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node {
+ if p.comments != c {
+ if !p.panicking {
+ err := errors.Newf(p.pos, "unmatched comments")
+ p.errors = errors.Append(p.errors, err)
+ p.panicking = true
+ panic(err)
+ }
+ return n
+ }
+ p.comments = c.parent
+ if c.parent != nil {
+ c.parent.lastChild = n
+ c.parent.lastPos = c.pos
+ c.parent.pos++
+ }
+ for _, cg := range c.groups {
+ if n != nil {
+ if cg != nil {
+ n.AddComment(cg)
+ }
+ }
+ }
+ c.groups = nil
+ return n
+}
+
+func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr {
+ c.closeNode(p, n)
+ return n
+}
+
+func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause {
+ c.closeNode(p, n)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ pos := p.file.Position(p.pos)
+ fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *parser) {
+ p.indent--
+ p.printTrace(")")
+}
+
+// Advance to the next
+func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+ // when tracing as it provides a more readable output. The
+ // very first token (!p.pos.IsValid()) is not initialized
+ // (it is ILLEGAL), so don't print it .
+ if p.trace && p.pos.IsValid() {
+ s := p.tok.String()
+ switch {
+ case p.tok.IsLiteral():
+ p.printTrace(s, p.lit)
+ case p.tok.IsOperator(), p.tok.IsKeyword():
+ p.printTrace("\"" + s + "\"")
+ default:
+ p.printTrace(s)
+ }
+ }
+
+ p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+ // /*-style comments may end on a different line than where they start.
+ // Scan the comment for '\n' chars and adjust endline accordingly.
+ endline = p.file.Line(p.pos)
+ if p.lit[1] == '*' {
+ p.assertV0(p.pos, 0, 10, "block quotes")
+
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Slash: p.pos, Text: p.lit}
+ p.next0()
+
+ return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. A non-comment token or n
+// empty lines terminate a comment group.
+func (p *parser) consumeCommentGroup(prevLine, n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ var rel token.RelPos
+ endline = p.file.Line(p.pos)
+ switch endline - prevLine {
+ case 0:
+ rel = token.Blank
+ case 1:
+ rel = token.Newline
+ default:
+ rel = token.NewSection
+ }
+ for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ cg := &ast.CommentGroup{List: list}
+ ast.SetRelPos(cg, rel)
+ comments = cg
+ return
+}
+
+// Advance to the next non-comment In the process, collect
+// any comment groups encountered, and refield the last lead and
+// and line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+func (p *parser) next() {
+ // A leadComment may not be consumed if it leads an inner token of a node.
+ if p.leadComment != nil {
+ p.comments.add(p.leadComment)
+ }
+ p.leadComment = nil
+ prev := p.pos
+ p.next0()
+ p.comments.pos++
+
+ if p.tok == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ currentLine := p.file.Line(p.pos)
+ prevLine := p.file.Line(prev)
+ if prevLine == currentLine {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(prevLine, 0)
+ if p.file.Line(p.pos) != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ comment.Line = true
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok == token.COMMENT {
+ if comment != nil {
+ p.comments.add(comment)
+ }
+ comment, endline = p.consumeCommentGroup(prevLine, 1)
+ prevLine = currentLine
+ currentLine = p.file.Line(p.pos)
+
+ }
+
+ if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF {
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ comment.Doc = true
+ p.leadComment = comment
+ } else {
+ p.comments.add(comment)
+ }
+ }
+}
+
+// assertV0 indicates the last version at which a certain feature was
+// supported.
+func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) {
+ v := version0(minor, patch)
+ if p.version != 0 && p.version > v {
+ p.errors = errors.Append(p.errors,
+ errors.Wrapf(&DeprecationError{v}, pos,
+ "use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1))
+ }
+}
+
+func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) {
+ // ePos := p.file.Position(pos)
+ ePos := pos
+
+ // If AllErrors is not set, discard errors reported on the same line
+ // as the last recorded error and stop parsing if there are more than
+ // 10 errors.
+ if p.mode&allErrorsMode == 0 {
+ errors := errors.Errors(p.errors)
+ n := len(errors)
+ if n > 0 && errors[n-1].Position().Line() == ePos.Line() {
+ return // discard - likely a spurious error
+ }
+ if n > 10 {
+ p.panicking = true
+ panic("too many errors")
+ }
+ }
+
+ p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...))
+}
+
+func (p *parser) errorExpected(pos token.Pos, obj string) {
+ if pos != p.pos {
+ p.errf(pos, "expected %s", obj)
+ return
+ }
+ // the error happened at the current position;
+ // make the error message more specific
+ if p.tok == token.COMMA && p.lit == "\n" {
+ p.errf(pos, "expected %s, found newline", obj)
+ return
+ }
+
+ if p.tok.IsLiteral() {
+ p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit)
+ } else {
+ p.errf(pos, "expected %s, found '%s'", obj, p.tok)
+ }
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+ pos := p.pos
+ if p.tok != tok {
+ p.errorExpected(pos, "'"+tok.String()+"'")
+ }
+ p.next() // make progress
+ return pos
+}
+
+// expectClosing is like expect but provides a better error message
+// for the common case of a missing comma before a newline.
+func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
+ if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" {
+ p.errf(p.pos, "missing ',' before newline in %s", context)
+ p.next()
+ }
+ return p.expect(tok)
+}
+
+func (p *parser) expectComma() {
+ // semicolon is optional before a closing ')', ']', '}', or newline
+ if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF {
+ switch p.tok {
+ case token.COMMA:
+ p.next()
+ default:
+ p.errorExpected(p.pos, "','")
+ syncExpr(p)
+ }
+ }
+}
+
+func (p *parser) atComma(context string, follow ...token.Token) bool {
+ if p.tok == token.COMMA {
+ return true
+ }
+ for _, t := range follow {
+ if p.tok == t {
+ return false
+ }
+ }
+ // TODO: find a way to detect crossing lines now we don't have a semi.
+ if p.lit == "\n" {
+ p.errf(p.pos, "missing ',' before newline")
+ } else {
+ p.errf(p.pos, "missing ',' in %s", context)
+ }
+ return true // "insert" comma and continue
+}
+
+// syncExpr advances to the next field in a field list.
+// Used for synchronization after an error.
+func syncExpr(p *parser) {
+ for {
+ switch p.tok {
+ case token.COMMA:
+ // Return only if parser made some progress since last
+ // sync or if it has not reached 10 sync calls without
+ // progress. Otherwise consume at least one token to
+ // avoid an endless parser loop (it is possible that
+ // both parseOperand and parseStmt call syncStmt and
+ // correctly do not advance, thus the need for the
+ // invocation limit p.syncCnt).
+ if p.pos == p.syncPos && p.syncCnt < 10 {
+ p.syncCnt++
+ return
+ }
+ if p.syncPos.Before(p.pos) {
+ p.syncPos = p.pos
+ p.syncCnt = 0
+ return
+ }
+ // Reaching here indicates a parser bug, likely an
+ // incorrect token list in this function, but it only
+ // leads to skipping of possibly correct code if a
+ // previous error is present, and thus is preferred
+ // over a non-terminating parse.
+ case token.EOF:
+ return
+ }
+ p.next()
+ }
+}
+
+// safePos returns a valid file position for a given position: If pos
+// is valid to begin with, safePos returns pos. If pos is out-of-range,
+// safePos returns the EOF position.
+//
+// This is hack to work around "artificial" end positions in the AST which
+// are computed by adding 1 to (presumably valid) token positions. If the
+// token positions are invalid due to parse errors, the resulting end position
+// may be past the file's EOF position, which would lead to panics if used
+// later on.
+func (p *parser) safePos(pos token.Pos) (res token.Pos) {
+ defer func() {
+ if recover() != nil {
+ res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position
+ }
+ }()
+ _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
+ return pos
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+ c := p.openComments()
+ pos := p.pos
+ name := "_"
+ if p.tok == token.IDENT {
+ name = p.lit
+ p.next()
+ } else {
+ p.expect(token.IDENT) // use expect() error handling
+ }
+ ident := &ast.Ident{NamePos: pos, Name: name}
+ c.closeNode(p, ident)
+ return ident
+}
+
+func (p *parser) parseKeyIdent() *ast.Ident {
+ c := p.openComments()
+ pos := p.pos
+ name := p.lit
+ p.next()
+ ident := &ast.Ident{NamePos: pos, Name: name}
+ c.closeNode(p, ident)
+ return ident
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+// parseOperand returns an expression.
+// Callers must verify the result.
+func (p *parser) parseOperand() (expr ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "Operand"))
+ }
+
+ switch p.tok {
+ case token.IDENT:
+ return p.parseIdent()
+
+ case token.LBRACE:
+ return p.parseStruct()
+
+ case token.LBRACK:
+ return p.parseList()
+
+ case token.BOTTOM:
+ c := p.openComments()
+ x := &ast.BottomLit{Bottom: p.pos}
+ p.next()
+ return c.closeExpr(p, x)
+
+ case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING:
+ c := p.openComments()
+ x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
+ p.next()
+ return c.closeExpr(p, x)
+
+ case token.INTERPOLATION:
+ return p.parseInterpolation()
+
+ case token.LPAREN:
+ c := p.openComments()
+ defer func() { c.closeNode(p, expr) }()
+ lparen := p.pos
+ p.next()
+ p.exprLev++
+ p.openList()
+ x := p.parseRHS() // types may be parenthesized: (some type)
+ p.closeList()
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{
+ Lparen: lparen,
+ X: x,
+ Rparen: rparen}
+
+ default:
+ if p.tok.IsKeyword() {
+ return p.parseKeyIdent()
+ }
+ }
+
+ // we have an error
+ c := p.openComments()
+ pos := p.pos
+ p.errorExpected(pos, "operand")
+ syncExpr(p)
+ return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos})
+}
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "IndexOrSlice"))
+ }
+
+ c := p.openComments()
+ defer func() { c.closeNode(p, expr) }()
+ c.pos = 1
+
+ const N = 2
+ lbrack := p.expect(token.LBRACK)
+
+ p.exprLev++
+ var index [N]ast.Expr
+ var colons [N - 1]token.Pos
+ if p.tok != token.COLON {
+ index[0] = p.parseRHS()
+ }
+ nColons := 0
+ for p.tok == token.COLON && nColons < len(colons) {
+ colons[nColons] = p.pos
+ nColons++
+ p.next()
+ if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
+ index[nColons] = p.parseRHS()
+ }
+ }
+ p.exprLev--
+ rbrack := p.expect(token.RBRACK)
+
+ if nColons > 0 {
+ return &ast.SliceExpr{
+ X: x,
+ Lbrack: lbrack,
+ Low: index[0],
+ High: index[1],
+ Rbrack: rbrack}
+ }
+
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: index[0],
+ Rbrack: rbrack}
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) {
+ if p.trace {
+ defer un(trace(p, "CallOrConversion"))
+ }
+ c := p.openComments()
+ defer func() { c.closeNode(p, expr) }()
+
+ p.openList()
+ defer p.closeList()
+
+ lparen := p.expect(token.LPAREN)
+
+ p.exprLev++
+ var list []ast.Expr
+ for p.tok != token.RPAREN && p.tok != token.EOF {
+ list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...)
+ if !p.atComma("argument list", token.RPAREN) {
+ break
+ }
+ p.next()
+ }
+ p.exprLev--
+ rparen := p.expectClosing(token.RPAREN, "argument list")
+
+ return &ast.CallExpr{
+ Fun: fun,
+ Lparen: lparen,
+ Args: list,
+ Rparen: rparen}
+}
+
+// TODO: inline this function in parseFieldList once we no longer user comment
+// position information in parsing.
+func (p *parser) consumeDeclComma() {
+ if p.atComma("struct literal", token.RBRACE, token.EOF) {
+ p.next()
+ }
+}
+
+func (p *parser) parseFieldList() (list []ast.Decl) {
+ if p.trace {
+ defer un(trace(p, "FieldList"))
+ }
+ p.openList()
+ defer p.closeList()
+
+ for p.tok != token.RBRACE && p.tok != token.EOF {
+ switch p.tok {
+ case token.ATTRIBUTE:
+ list = append(list, p.parseAttribute())
+ p.consumeDeclComma()
+
+ case token.ELLIPSIS:
+ c := p.openComments()
+ ellipsis := &ast.Ellipsis{Ellipsis: p.pos}
+ p.next()
+ c.closeNode(p, ellipsis)
+ list = append(list, ellipsis)
+ p.consumeDeclComma()
+
+ default:
+ list = append(list, p.parseField())
+ }
+
+ // TODO: handle next comma here, after disallowing non-colon separator
+ // and we have eliminated the need comment positions.
+ }
+
+ return
+}
+
+func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) {
+ if p.trace {
+ defer un(trace(p, "Field"))
+ }
+
+ c := p.openComments()
+
+ letPos := p.expect(token.LET)
+ if p.tok != token.IDENT {
+ c.closeNode(p, ident)
+ return nil, &ast.Ident{
+ NamePos: letPos,
+ Name: "let",
+ }
+ }
+ defer func() { c.closeNode(p, decl) }()
+
+ ident = p.parseIdent()
+ assign := p.expect(token.BIND)
+ expr := p.parseRHS()
+
+ p.consumeDeclComma()
+
+ return &ast.LetClause{
+ Let: letPos,
+ Ident: ident,
+ Equal: assign,
+ Expr: expr,
+ }, nil
+}
+
+func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) {
+ if p.trace {
+ defer un(trace(p, "Comprehension"))
+ }
+
+ c := p.openComments()
+ defer func() { c.closeNode(p, decl) }()
+
+ tok := p.tok
+ pos := p.pos
+ clauses, fc := p.parseComprehensionClauses(true)
+ if fc != nil {
+ ident = &ast.Ident{
+ NamePos: pos,
+ Name: tok.String(),
+ }
+ fc.closeNode(p, ident)
+ return nil, ident
+ }
+
+ sc := p.openComments()
+ expr := p.parseStruct()
+ sc.closeExpr(p, expr)
+
+ if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
+ p.next()
+ }
+
+ return &ast.Comprehension{
+ Clauses: clauses,
+ Value: expr,
+ }, nil
+}
+
+func (p *parser) parseField() (decl ast.Decl) {
+ if p.trace {
+ defer un(trace(p, "Field"))
+ }
+
+ c := p.openComments()
+ defer func() { c.closeNode(p, decl) }()
+
+ pos := p.pos
+
+ this := &ast.Field{Label: nil}
+ m := this
+
+ tok := p.tok
+
+ label, expr, decl, ok := p.parseLabel(false)
+ if decl != nil {
+ return decl
+ }
+ m.Label = label
+
+ if !ok {
+ if expr == nil {
+ expr = p.parseRHS()
+ }
+ if a, ok := expr.(*ast.Alias); ok {
+ p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
+ p.consumeDeclComma()
+ return a
+ }
+ e := &ast.EmbedDecl{Expr: expr}
+ p.consumeDeclComma()
+ return e
+ }
+
+ if p.tok == token.OPTION {
+ m.Optional = p.pos
+ p.next()
+ }
+
+ // TODO: consider disallowing comprehensions with more than one label.
+ // This can be a bit awkward in some cases, but it would naturally
+ // enforce the proper style that a comprehension be defined in the
+ // smallest possible scope.
+ // allowComprehension = false
+
+ switch p.tok {
+ case token.COLON, token.ISA:
+ case token.COMMA:
+ p.expectComma() // sync parser.
+ fallthrough
+
+ case token.RBRACE, token.EOF:
+ if a, ok := expr.(*ast.Alias); ok {
+ p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
+ return a
+ }
+ switch tok {
+ case token.IDENT, token.LBRACK, token.LPAREN,
+ token.STRING, token.INTERPOLATION,
+ token.NULL, token.TRUE, token.FALSE,
+ token.FOR, token.IF, token.LET, token.IN:
+ return &ast.EmbedDecl{Expr: expr}
+ }
+ fallthrough
+
+ default:
+ p.errorExpected(p.pos, "label or ':'")
+ return &ast.BadDecl{From: pos, To: p.pos}
+ }
+
+ m.TokenPos = p.pos
+ m.Token = p.tok
+ if p.tok == token.ISA {
+ p.assertV0(p.pos, 2, 0, "'::'")
+ }
+ if p.tok != token.COLON && p.tok != token.ISA {
+ p.errorExpected(pos, "':' or '::'")
+ }
+ p.next() // : or ::
+
+ for {
+ if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 {
+ p.errf(l.Pos(), "square bracket must have exactly one element")
+ }
+
+ tok := p.tok
+ label, expr, _, ok := p.parseLabel(true)
+ if !ok || (p.tok != token.COLON && p.tok != token.ISA && p.tok != token.OPTION) {
+ if expr == nil {
+ expr = p.parseRHS()
+ }
+ m.Value = expr
+ break
+ }
+ field := &ast.Field{Label: label}
+ m.Value = &ast.StructLit{Elts: []ast.Decl{field}}
+ m = field
+
+ if tok != token.LSS && p.tok == token.OPTION {
+ m.Optional = p.pos
+ p.next()
+ }
+
+ m.TokenPos = p.pos
+ m.Token = p.tok
+ if p.tok == token.ISA {
+ p.assertV0(p.pos, 2, 0, "'::'")
+ }
+ if p.tok != token.COLON && p.tok != token.ISA {
+ if p.tok.IsLiteral() {
+ p.errf(p.pos, "expected ':' or '::'; found %s", p.lit)
+ } else {
+ p.errf(p.pos, "expected ':' or '::'; found %s", p.tok)
+ }
+ break
+ }
+ p.next()
+ }
+
+ if attrs := p.parseAttributes(); attrs != nil {
+ m.Attrs = attrs
+ }
+
+ p.consumeDeclComma()
+
+ return this
+}
+
+func (p *parser) parseAttributes() (attrs []*ast.Attribute) {
+ p.openList()
+ for p.tok == token.ATTRIBUTE {
+ attrs = append(attrs, p.parseAttribute())
+ }
+ p.closeList()
+ return attrs
+}
+
+func (p *parser) parseAttribute() *ast.Attribute {
+ c := p.openComments()
+ a := &ast.Attribute{At: p.pos, Text: p.lit}
+ p.next()
+ c.closeNode(p, a)
+ return a
+}
+
+func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) {
+ tok := p.tok
+ switch tok {
+
+ case token.FOR, token.IF:
+ if rhs {
+ expr = p.parseExpr()
+ break
+ }
+ comp, ident := p.parseComprehension()
+ if comp != nil {
+ return nil, nil, comp, false
+ }
+ expr = ident
+
+ case token.LET:
+ let, ident := p.parseLetDecl()
+ if let != nil {
+ return nil, nil, let, false
+ }
+ expr = ident
+
+ case token.IDENT, token.STRING, token.INTERPOLATION, token.LPAREN,
+ token.NULL, token.TRUE, token.FALSE, token.IN:
+ expr = p.parseExpr()
+
+ case token.LBRACK:
+ expr = p.parseRHS()
+ switch x := expr.(type) {
+ case *ast.ListLit:
+ // Note: caller must verify this list is suitable as a label.
+ label, ok = x, true
+ }
+ }
+
+ switch x := expr.(type) {
+ case *ast.BasicLit:
+ switch x.Kind {
+ case token.STRING, token.NULL, token.TRUE, token.FALSE:
+ // Keywords that represent operands.
+
+ // Allowing keywords to be used as a labels should not interfere with
+ // generating good errors: any keyword can only appear on the RHS of a
+ // field (after a ':'), whereas labels always appear on the LHS.
+
+ label, ok = x, true
+ }
+
+ case *ast.Ident:
+ if strings.HasPrefix(x.Name, "__") {
+ p.errf(x.NamePos, "identifiers starting with '__' are reserved")
+ }
+
+ expr = p.parseAlias(x)
+ if a, ok := expr.(*ast.Alias); ok {
+ if _, ok = a.Expr.(ast.Label); !ok {
+ break
+ }
+ label = a
+ } else {
+ label = x
+ }
+ ok = true
+
+ case ast.Label:
+ label, ok = x, true
+ }
+ return label, expr, nil, ok
+}
+
+func (p *parser) parseStruct() (expr ast.Expr) {
+ lbrace := p.expect(token.LBRACE)
+
+ if p.trace {
+ defer un(trace(p, "StructLit"))
+ }
+
+ elts := p.parseStructBody()
+ rbrace := p.expectClosing(token.RBRACE, "struct literal")
+ return &ast.StructLit{
+ Lbrace: lbrace,
+ Elts: elts,
+ Rbrace: rbrace,
+ }
+}
+
+func (p *parser) parseStructBody() []ast.Decl {
+ if p.trace {
+ defer un(trace(p, "StructBody"))
+ }
+
+ p.exprLev++
+ var elts []ast.Decl
+
+ // TODO: consider "stealing" non-lead comments.
+ // for _, cg := range p.comments.groups {
+ // if cg != nil {
+ // elts = append(elts, cg)
+ // }
+ // }
+ // p.comments.groups = p.comments.groups[:0]
+
+ if p.tok != token.RBRACE {
+ elts = p.parseFieldList()
+ }
+ p.exprLev--
+
+ return elts
+}
+
+// parseComprehensionClauses parses either new-style (first==true)
+// or old-style (first==false).
+// Should we now disallow keywords as identifiers? If not, we need to
+// return a list of discovered labels as the alternative.
+func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) {
+ // TODO: reuse Template spec, which is possible if it doesn't check the
+ // first is an identifier.
+
+ for {
+ switch p.tok {
+ case token.FOR:
+ c := p.openComments()
+ forPos := p.expect(token.FOR)
+ if first {
+ switch p.tok {
+ case token.COLON, token.ISA, token.BIND, token.OPTION,
+ token.COMMA, token.EOF:
+ return nil, c
+ }
+ }
+
+ var key, value *ast.Ident
+ var colon token.Pos
+ value = p.parseIdent()
+ if p.tok == token.COMMA {
+ colon = p.expect(token.COMMA)
+ key = value
+ value = p.parseIdent()
+ }
+ c.pos = 4
+ // params := p.parseParams(nil, ARROW)
+ clauses = append(clauses, c.closeClause(p, &ast.ForClause{
+ For: forPos,
+ Key: key,
+ Colon: colon,
+ Value: value,
+ In: p.expect(token.IN),
+ Source: p.parseRHS(),
+ }))
+
+ case token.IF:
+ c := p.openComments()
+ ifPos := p.expect(token.IF)
+ if first {
+ switch p.tok {
+ case token.COLON, token.ISA, token.BIND, token.OPTION,
+ token.COMMA, token.EOF:
+ return nil, c
+ }
+ }
+
+ clauses = append(clauses, c.closeClause(p, &ast.IfClause{
+ If: ifPos,
+ Condition: p.parseRHS(),
+ }))
+
+ case token.LET:
+ c := p.openComments()
+ letPos := p.expect(token.LET)
+
+ ident := p.parseIdent()
+ assign := p.expect(token.BIND)
+ expr := p.parseRHS()
+
+ clauses = append(clauses, c.closeClause(p, &ast.LetClause{
+ Let: letPos,
+ Ident: ident,
+ Equal: assign,
+ Expr: expr,
+ }))
+
+ default:
+ return clauses, nil
+ }
+ if p.tok == token.COMMA {
+ p.next()
+ }
+
+ first = false
+ }
+}
+
+func (p *parser) parseList() (expr ast.Expr) {
+ lbrack := p.expect(token.LBRACK)
+
+ if p.trace {
+ defer un(trace(p, "ListLiteral"))
+ }
+
+ elts := p.parseListElements()
+
+ if p.tok == token.ELLIPSIS {
+ ellipsis := &ast.Ellipsis{
+ Ellipsis: p.pos,
+ }
+ elts = append(elts, ellipsis)
+ p.next()
+ if p.tok != token.COMMA && p.tok != token.RBRACK {
+ ellipsis.Type = p.parseRHS()
+ }
+ if p.atComma("list literal", token.RBRACK) {
+ p.next()
+ }
+ }
+
+ rbrack := p.expectClosing(token.RBRACK, "list literal")
+ return &ast.ListLit{
+ Lbrack: lbrack,
+ Elts: elts,
+ Rbrack: rbrack}
+}
+
+func (p *parser) parseListElements() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ListElements"))
+ }
+ p.openList()
+ defer p.closeList()
+
+ for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF {
+ expr, ok := p.parseListElement()
+ list = append(list, expr)
+ if !ok {
+ break
+ }
+ }
+
+ return
+}
+
+func (p *parser) parseListElement() (expr ast.Expr, ok bool) {
+ if p.trace {
+ defer un(trace(p, "ListElement"))
+ }
+ c := p.openComments()
+ defer func() { c.closeNode(p, expr) }()
+
+ switch p.tok {
+ case token.FOR, token.IF:
+ tok := p.tok
+ pos := p.pos
+ clauses, fc := p.parseComprehensionClauses(true)
+ if clauses != nil {
+ sc := p.openComments()
+ expr := p.parseStruct()
+ sc.closeExpr(p, expr)
+
+ if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF
+ p.next()
+ }
+
+ return &ast.Comprehension{
+ Clauses: clauses,
+ Value: expr,
+ }, true
+ }
+
+ expr = &ast.Ident{
+ NamePos: pos,
+ Name: tok.String(),
+ }
+ fc.closeNode(p, expr)
+
+ default:
+ expr = p.parseUnaryExpr()
+ }
+
+ expr = p.parseBinaryExprTail(token.LowestPrec+1, expr)
+ expr = p.parseAlias(expr)
+
+ // Enforce there is an explicit comma. We could also allow the
+ // omission of commas in lists, but this gives rise to some ambiguities
+ // with list comprehensions.
+ if p.tok == token.COMMA && p.lit != "," {
+ p.next()
+ // Allow missing comma for last element, though, to be compliant
+ // with JSON.
+ if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF {
+ return expr, false
+ }
+ p.errf(p.pos, "missing ',' before newline in list literal")
+ } else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) {
+ return expr, false
+ }
+ p.next()
+
+ return expr, true
+}
+
+// parseAlias turns an expression into an alias.
+func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) {
+ if p.tok != token.BIND {
+ return lhs
+ }
+ pos := p.pos
+ p.next()
+ expr = p.parseRHS()
+ if expr == nil {
+ panic("empty return")
+ }
+ switch x := lhs.(type) {
+ case *ast.Ident:
+ return &ast.Alias{Ident: x, Equal: pos, Expr: expr}
+ }
+ p.errf(p.pos, "expected identifier for alias")
+ return expr
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+ switch unparen(x).(type) {
+ case *ast.BadExpr:
+ case *ast.BottomLit:
+ case *ast.Ident:
+ case *ast.BasicLit:
+ case *ast.Interpolation:
+ case *ast.StructLit:
+ case *ast.ListLit:
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.SelectorExpr:
+ case *ast.IndexExpr:
+ case *ast.SliceExpr:
+ case *ast.CallExpr:
+ case *ast.UnaryExpr:
+ case *ast.BinaryExpr:
+ default:
+ // all other nodes are not proper expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{
+ From: x.Pos(), To: p.safePos(x.End()),
+ }
+ }
+ return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+ if p, isParen := x.(*ast.ParenExpr); isParen {
+ x = unparen(p.X)
+ }
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "PrimaryExpr"))
+ }
+
+ return p.parsePrimaryExprTail(p.parseOperand())
+}
+
+func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr {
+ x := operand
+L:
+ for {
+ switch p.tok {
+ case token.PERIOD:
+ c := p.openComments()
+ c.pos = 1
+ p.next()
+ switch p.tok {
+ case token.IDENT:
+ x = &ast.SelectorExpr{
+ X: p.checkExpr(x),
+ Sel: p.parseIdent(),
+ }
+ case token.STRING:
+ if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) {
+ str := &ast.BasicLit{
+ ValuePos: p.pos,
+ Kind: token.STRING,
+ Value: p.lit,
+ }
+ p.next()
+ x = &ast.SelectorExpr{
+ X: p.checkExpr(x),
+ Sel: str,
+ }
+ break
+ }
+ fallthrough
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "selector")
+ p.next() // make progress
+ x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}}
+ }
+ c.closeNode(p, x)
+ case token.LBRACK:
+ x = p.parseIndexOrSlice(p.checkExpr(x))
+ case token.LPAREN:
+ x = p.parseCallOrConversion(p.checkExpr(x))
+ default:
+ break L
+ }
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "UnaryExpr"))
+ }
+
+ switch p.tok {
+ case token.ADD, token.SUB, token.NOT, token.MUL,
+ token.LSS, token.LEQ, token.GEQ, token.GTR,
+ token.NEQ, token.MAT, token.NMAT:
+ pos, op := p.pos, p.tok
+ c := p.openComments()
+ p.next()
+ return c.closeExpr(p, &ast.UnaryExpr{
+ OpPos: pos,
+ Op: op,
+ X: p.checkExpr(p.parseUnaryExpr()),
+ })
+ }
+
+ return p.parsePrimaryExpr()
+}
+
+func (p *parser) tokPrec() (token.Token, int) {
+ tok := p.tok
+ if tok == token.IDENT {
+ switch p.lit {
+ case "quo":
+ return token.IQUO, 7
+ case "rem":
+ return token.IREM, 7
+ case "div":
+ return token.IDIV, 7
+ case "mod":
+ return token.IMOD, 7
+ default:
+ return tok, 0
+ }
+ }
+ return tok, tok.Precedence()
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "BinaryExpr"))
+ }
+ p.openList()
+ defer p.closeList()
+
+ return p.parseBinaryExprTail(prec1, p.parseUnaryExpr())
+}
+
+func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr {
+ for {
+ op, prec := p.tokPrec()
+ if prec < prec1 {
+ return x
+ }
+ c := p.openComments()
+ c.pos = 1
+ pos := p.expect(p.tok)
+ x = c.closeExpr(p, &ast.BinaryExpr{
+ X: p.checkExpr(x),
+ OpPos: pos,
+ Op: op,
+ // Treat nested expressions as RHS.
+ Y: p.checkExpr(p.parseBinaryExpr(prec + 1))})
+ }
+}
+
+func (p *parser) parseInterpolation() (expr ast.Expr) {
+ c := p.openComments()
+ defer func() { c.closeNode(p, expr) }()
+
+ p.openList()
+ defer p.closeList()
+
+ cc := p.openComments()
+
+ lit := p.lit
+ pos := p.pos
+ p.next()
+ last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit}
+ exprs := []ast.Expr{last}
+
+ for p.tok == token.LPAREN {
+ c.pos = 1
+ p.expect(token.LPAREN)
+ cc.closeExpr(p, last)
+
+ exprs = append(exprs, p.parseRHS())
+
+ cc = p.openComments()
+ if p.tok != token.RPAREN {
+ p.errf(p.pos, "expected ')' for string interpolation")
+ }
+ lit = p.scanner.ResumeInterpolation()
+ pos = p.pos
+ p.next()
+ last = &ast.BasicLit{
+ ValuePos: pos,
+ Kind: token.STRING,
+ Value: lit,
+ }
+ exprs = append(exprs, last)
+ }
+ cc.closeExpr(p, last)
+ return &ast.Interpolation{Elts: exprs}
+}
+
+// Callers must check the result (using checkExpr), depending on context.
+func (p *parser) parseExpr() (expr ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+
+ c := p.openComments()
+ defer func() { c.closeExpr(p, expr) }()
+
+ return p.parseBinaryExpr(token.LowestPrec + 1)
+}
+
+func (p *parser) parseRHS() ast.Expr {
+ x := p.checkExpr(p.parseExpr())
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+func isValidImport(lit string) bool {
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal
+ if p := strings.LastIndexByte(s, ':'); p >= 0 {
+ s = s[:p]
+ }
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return false
+ }
+ }
+ return s != ""
+}
+
+func (p *parser) parseImportSpec(_ int) *ast.ImportSpec {
+ if p.trace {
+ defer un(trace(p, "ImportSpec"))
+ }
+
+ c := p.openComments()
+
+ var ident *ast.Ident
+ if p.tok == token.IDENT {
+ ident = p.parseIdent()
+ }
+
+ pos := p.pos
+ var path string
+ if p.tok == token.STRING {
+ path = p.lit
+ if !isValidImport(path) {
+ p.errf(pos, "invalid import path: %s", path)
+ }
+ p.next()
+ p.expectComma() // call before accessing p.linecomment
+ } else {
+ p.expect(token.STRING) // use expect() error handling
+ if p.tok == token.COMMA {
+ p.expectComma() // call before accessing p.linecomment
+ }
+ }
+ // collect imports
+ spec := &ast.ImportSpec{
+ Name: ident,
+ Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
+ }
+ c.closeNode(p, spec)
+ p.imports = append(p.imports, spec)
+
+ return spec
+}
+
+func (p *parser) parseImports() *ast.ImportDecl {
+ if p.trace {
+ defer un(trace(p, "Imports"))
+ }
+ c := p.openComments()
+
+ ident := p.parseIdent()
+ var lparen, rparen token.Pos
+ var list []*ast.ImportSpec
+ if p.tok == token.LPAREN {
+ lparen = p.pos
+ p.next()
+ p.openList()
+ for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+ list = append(list, p.parseImportSpec(iota))
+ }
+ p.closeList()
+ rparen = p.expect(token.RPAREN)
+ p.expectComma()
+ } else {
+ list = append(list, p.parseImportSpec(0))
+ }
+
+ d := &ast.ImportDecl{
+ Import: ident.Pos(),
+ Lparen: lparen,
+ Specs: list,
+ Rparen: rparen,
+ }
+ c.closeNode(p, d)
+ return d
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+ if p.trace {
+ defer un(trace(p, "File"))
+ }
+
+ c := p.comments
+
+ // Don't bother parsing the rest if we had errors scanning the first
+ // Likely not a Go source file at all.
+ if p.errors != nil {
+ return nil
+ }
+ p.openList()
+
+ var decls []ast.Decl
+
+ for p.tok == token.ATTRIBUTE {
+ decls = append(decls, p.parseAttribute())
+ p.consumeDeclComma()
+ }
+
+ // The package clause is not a declaration: it does not appear in any
+ // scope.
+ if p.tok == token.IDENT && p.lit == "package" {
+ c := p.openComments()
+
+ pos := p.pos
+ var name *ast.Ident
+ p.expect(token.IDENT)
+ name = p.parseIdent()
+ if name.Name == "_" && p.mode&declarationErrorsMode != 0 {
+ p.errf(p.pos, "invalid package name _")
+ }
+
+ pkg := &ast.Package{
+ PackagePos: pos,
+ Name: name,
+ }
+ decls = append(decls, pkg)
+ p.expectComma()
+ c.closeNode(p, pkg)
+ }
+
+ for p.tok == token.ATTRIBUTE {
+ decls = append(decls, p.parseAttribute())
+ p.consumeDeclComma()
+ }
+
+ if p.mode&packageClauseOnlyMode == 0 {
+ // import decls
+ for p.tok == token.IDENT && p.lit == "import" {
+ decls = append(decls, p.parseImports())
+ }
+
+ if p.mode&importsOnlyMode == 0 {
+ // rest of package decls
+ // TODO: loop and allow multiple expressions.
+ decls = append(decls, p.parseFieldList()...)
+ p.expect(token.EOF)
+ }
+ }
+ p.closeList()
+
+ f := &ast.File{
+ Imports: p.imports,
+ Decls: decls,
+ }
+ c.closeNode(p, f)
+ return f
+}
diff --git a/vendor/cuelang.org/go/cue/path.go b/vendor/cuelang.org/go/cue/path.go
new file mode 100644
index 0000000000..8054107848
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/path.go
@@ -0,0 +1,520 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/astinternal"
+ "cuelang.org/go/internal/core/adt"
+ "github.com/cockroachdb/apd/v2"
+)
+
+// A Selector is a component of a path.
+type Selector struct {
+ sel selector
+}
+
+// String reports the CUE representation of a selector.
+func (sel Selector) String() string {
+ return sel.sel.String()
+}
+
+// IsString reports whether sel is a regular label type.
+func (sel Selector) IsString() bool {
+ return sel.sel.kind() == adt.StringLabel
+}
+
+// IsDefinition reports whether sel is a non-hidden definition label type.
+func (sel Selector) IsDefinition() bool {
+ return sel.sel.kind() == adt.DefinitionLabel
+}
+
+// PkgPath reports the package path associated with a hidden label or "" if
+// this is not a hidden label.
+func (sel Selector) PkgPath() string {
+ h, _ := sel.sel.(scopedSelector)
+ return h.pkg
+}
+
+var (
+
+ // AnyDefinition can be used to ask for any definition.
+ //
+ // In paths it is used to select constraints that apply to all elements.
+ // AnyDefinition = anyDefinition
+ anyDefinition = Selector{sel: anySelector(adt.AnyDefinition)}
+
+ // AnyIndex can be used to ask for any index.
+ //
+ // In paths it is used to select constraints that apply to all elements.
+ AnyIndex = anyIndex
+ anyIndex = Selector{sel: anySelector(adt.AnyIndex)}
+
+ // AnyString can be used to ask for any regular string field.
+ //
+ // In paths it is used to select constraints that apply to all elements.
+ AnyString = anyString
+ anyString = Selector{sel: anySelector(adt.AnyString)}
+)
+
+// Optional converts sel into an optional equivalent.
+// foo -> foo?
+func (sel Selector) Optional() Selector {
+ return wrapOptional(sel)
+}
+
+type selector interface {
+ String() string
+
+ feature(ctx adt.Runtime) adt.Feature
+ kind() adt.FeatureType
+ optional() bool
+}
+
+// A Path is series of selectors to query a CUE value.
+type Path struct {
+ path []Selector
+}
+
+// MakePath creates a Path from a sequence of selectors.
+func MakePath(selectors ...Selector) Path {
+ return Path{path: selectors}
+}
+
+// pathToString is a utility function for creating debugging info.
+func pathToStrings(p Path) (a []string) {
+ for _, sel := range p.Selectors() {
+ a = append(a, sel.String())
+ }
+ return a
+}
+
+// ParsePath parses a CUE expression into a Path. Any error resulting from
+// this conversion can be obtained by calling Err on the result.
+//
+// Unlike with normal CUE expressions, the first element of the path may be
+// a string literal.
+//
+// A path may not contain hidden fields. To create a path with hidden fields,
+// use MakePath and Ident.
+func ParsePath(s string) Path {
+ if s == "" {
+ return Path{}
+ }
+ expr, err := parser.ParseExpr("", s)
+ if err != nil {
+ return MakePath(Selector{pathError{errors.Promote(err, "invalid path")}})
+ }
+
+ p := Path{path: toSelectors(expr)}
+ for _, sel := range p.path {
+ if sel.sel.kind().IsHidden() {
+ return MakePath(Selector{pathError{errors.Newf(token.NoPos,
+ "invalid path: hidden fields not allowed in path %s", s)}})
+ }
+ }
+ return p
+}
+
+// Selectors reports the individual selectors of a path.
+func (p Path) Selectors() []Selector {
+ return p.path
+}
+
+// String reports the CUE representation of p.
+func (p Path) String() string {
+ if err := p.Err(); err != nil {
+ return "_|_"
+ }
+
+ b := &strings.Builder{}
+ for i, sel := range p.path {
+ x := sel.sel
+ // TODO: use '.' in all cases, once supported.
+ _, isAny := x.(anySelector)
+ switch {
+ case x.kind() == adt.IntLabel && !isAny:
+ b.WriteByte('[')
+ b.WriteString(x.String())
+ b.WriteByte(']')
+ continue
+ case i > 0:
+ b.WriteByte('.')
+ }
+
+ b.WriteString(x.String())
+ }
+ return b.String()
+}
+
+// Optional returns the optional form of a Path. For instance,
+// foo.bar --> foo?.bar?
+//
+func (p Path) Optional() Path {
+ q := make([]Selector, 0, len(p.path))
+ for _, s := range p.path {
+ q = appendSelector(q, wrapOptional(s))
+ }
+ return Path{path: q}
+}
+
+func toSelectors(expr ast.Expr) []Selector {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ return []Selector{Label(x)}
+
+ case *ast.BasicLit:
+ return []Selector{basicLitSelector(x)}
+
+ case *ast.IndexExpr:
+ a := toSelectors(x.X)
+ var sel Selector
+ if b, ok := x.Index.(*ast.BasicLit); !ok {
+ sel = Selector{pathError{
+ errors.Newf(token.NoPos, "non-constant expression %s",
+ astinternal.DebugStr(x.Index))}}
+ } else {
+ sel = basicLitSelector(b)
+ }
+ return appendSelector(a, sel)
+
+ case *ast.SelectorExpr:
+ a := toSelectors(x.X)
+ return appendSelector(a, Label(x.Sel))
+
+ default:
+ return []Selector{{pathError{
+ errors.Newf(token.NoPos, "invalid label %s ", astinternal.DebugStr(x)),
+ }}}
+ }
+}
+
+// appendSelector is like append(a, sel), except that it collects errors
+// in a one-element slice.
+func appendSelector(a []Selector, sel Selector) []Selector {
+ err, isErr := sel.sel.(pathError)
+ if len(a) == 1 {
+ if p, ok := a[0].sel.(pathError); ok {
+ if isErr {
+ p.Error = errors.Append(p.Error, err.Error)
+ }
+ return a
+ }
+ }
+ if isErr {
+ return []Selector{sel}
+ }
+ return append(a, sel)
+}
+
+func basicLitSelector(b *ast.BasicLit) Selector {
+ switch b.Kind {
+ case token.INT:
+ var n literal.NumInfo
+ if err := literal.ParseNum(b.Value, &n); err != nil {
+ return Selector{pathError{
+ errors.Newf(token.NoPos, "invalid string index %s", b.Value),
+ }}
+ }
+ var d apd.Decimal
+ _ = n.Decimal(&d)
+ i, err := d.Int64()
+ if err != nil {
+ return Selector{pathError{
+ errors.Newf(token.NoPos, "integer %s out of range", b.Value),
+ }}
+ }
+ return Index(int(i))
+
+ case token.STRING:
+ info, _, _, _ := literal.ParseQuotes(b.Value, b.Value)
+ if !info.IsDouble() {
+ return Selector{pathError{
+ errors.Newf(token.NoPos, "invalid string index %s", b.Value)}}
+ }
+ s, _ := literal.Unquote(b.Value)
+ return Selector{stringSelector(s)}
+
+ default:
+ return Selector{pathError{
+ errors.Newf(token.NoPos, "invalid literal %s", b.Value),
+ }}
+ }
+}
+
+// Label converts an AST label to a Selector.
+func Label(label ast.Label) Selector {
+ switch x := label.(type) {
+ case *ast.Ident:
+ switch s := x.Name; {
+ case strings.HasPrefix(s, "_"):
+ // TODO: extract package from a bound identifier.
+ return Selector{pathError{errors.Newf(token.NoPos,
+ "invalid path: hidden label %s not allowed", s),
+ }}
+ case strings.HasPrefix(s, "#"):
+ return Selector{definitionSelector(x.Name)}
+ default:
+ return Selector{stringSelector(x.Name)}
+ }
+
+ case *ast.BasicLit:
+ return basicLitSelector(x)
+
+ default:
+ return Selector{pathError{
+ errors.Newf(token.NoPos, "invalid label %s ", astinternal.DebugStr(x)),
+ }}
+ }
+}
+
+// Err reports errors that occurred when generating the path.
+func (p Path) Err() error {
+ var errs errors.Error
+ for _, x := range p.path {
+ if err, ok := x.sel.(pathError); ok {
+ errs = errors.Append(errs, err.Error)
+ }
+ }
+ return errs
+}
+
+func isHiddenOrDefinition(s string) bool {
+ return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_")
+}
+
+// Hid returns a selector for a hidden field. It panics is pkg is empty.
+// Hidden fields are scoped by package, and pkg indicates for which package
+// the hidden field must apply.For anonymous packages, it must be set to "_".
+func Hid(name, pkg string) Selector {
+ if !ast.IsValidIdent(name) {
+ panic(fmt.Sprintf("invalid identifier %s", name))
+ }
+ if !strings.HasPrefix(name, "_") {
+ panic(fmt.Sprintf("%s is not a hidden field identifier", name))
+ }
+ if pkg == "" {
+ panic(fmt.Sprintf("missing package for hidden identifier %s", name))
+ }
+ return Selector{scopedSelector{name, pkg}}
+}
+
+type scopedSelector struct {
+ name, pkg string
+}
+
+// String returns the CUE representation of the definition.
+func (s scopedSelector) String() string {
+ return s.name
+}
+func (scopedSelector) optional() bool { return false }
+
+func (s scopedSelector) kind() adt.FeatureType {
+ switch {
+ case strings.HasPrefix(s.name, "#"):
+ return adt.DefinitionLabel
+ case strings.HasPrefix(s.name, "_#"):
+ return adt.HiddenDefinitionLabel
+ case strings.HasPrefix(s.name, "_"):
+ return adt.HiddenLabel
+ default:
+ return adt.StringLabel
+ }
+}
+
+func (s scopedSelector) feature(r adt.Runtime) adt.Feature {
+ return adt.MakeIdentLabel(r, s.name, s.pkg)
+}
+
+// A Def marks a string as a definition label. An # will be added if a string is
+// not prefixed with a #. It will panic if s cannot be written as a valid
+// identifier.
+func Def(s string) Selector {
+ if !strings.HasPrefix(s, "#") && !strings.HasPrefix(s, "_#") {
+ s = "#" + s
+ }
+ if !ast.IsValidIdent(s) {
+ panic(fmt.Sprintf("invalid definition %s", s))
+ }
+ return Selector{definitionSelector(s)}
+}
+
+type definitionSelector string
+
+// String returns the CUE representation of the definition.
+func (d definitionSelector) String() string {
+ return string(d)
+}
+
+func (d definitionSelector) optional() bool { return false }
+
+func (d definitionSelector) kind() adt.FeatureType {
+ return adt.DefinitionLabel
+}
+
+func (d definitionSelector) feature(r adt.Runtime) adt.Feature {
+ return adt.MakeIdentLabel(r, string(d), "")
+}
+
+// A Str is a CUE string label. Definition selectors are defined with Def.
+func Str(s string) Selector {
+ return Selector{stringSelector(s)}
+}
+
+type stringSelector string
+
+func (s stringSelector) String() string {
+ str := string(s)
+ if isHiddenOrDefinition(str) || !ast.IsValidIdent(str) {
+ return literal.Label.Quote(str)
+ }
+ return str
+}
+
+func (s stringSelector) optional() bool { return false }
+func (s stringSelector) kind() adt.FeatureType { return adt.StringLabel }
+
+func (s stringSelector) feature(r adt.Runtime) adt.Feature {
+ return adt.MakeStringLabel(r, string(s))
+}
+
+// An Index selects a list element by index.
+func Index(x int) Selector {
+ f, err := adt.MakeLabel(nil, int64(x), adt.IntLabel)
+ if err != nil {
+ return Selector{pathError{err}}
+ }
+ return Selector{indexSelector(f)}
+}
+
+type indexSelector adt.Feature
+
+func (s indexSelector) String() string {
+ return strconv.Itoa(adt.Feature(s).Index())
+}
+
+func (s indexSelector) kind() adt.FeatureType { return adt.IntLabel }
+func (s indexSelector) optional() bool { return false }
+
+func (s indexSelector) feature(r adt.Runtime) adt.Feature {
+ return adt.Feature(s)
+}
+
+// an anySelector represents a wildcard option of a particular type.
+type anySelector adt.Feature
+
+func (s anySelector) String() string { return "[_]" }
+func (s anySelector) optional() bool { return true }
+func (s anySelector) kind() adt.FeatureType { return adt.Feature(s).Typ() }
+
+func (s anySelector) feature(r adt.Runtime) adt.Feature {
+ return adt.Feature(s)
+}
+
+// TODO: allow import paths to be represented?
+//
+// // ImportPath defines a lookup at the root of an instance. It must be the first
+// // element of a Path.
+// func ImportPath(s string) Selector {
+// return importSelector(s)
+// }
+type optionalSelector struct {
+ selector
+}
+
+func wrapOptional(sel Selector) Selector {
+ if !sel.sel.optional() {
+ sel = Selector{optionalSelector{sel.sel}}
+ }
+ return sel
+}
+
+// func isOptional(sel selector) bool {
+// _, ok := sel.(optionalSelector)
+// return ok
+// }
+
+func (s optionalSelector) optional() bool { return true }
+
+func (s optionalSelector) String() string {
+ return s.selector.String() + "?"
+}
+
+// TODO: allow looking up in parent scopes?
+
+// // Parent returns a Selector for looking up in the parent of a current node.
+// // Parent selectors may only occur at the start of a Path.
+// func Parent() Selector {
+// return parentSelector{}
+// }
+
+// type parentSelector struct{}
+
+// func (p parentSelector) String() string { return "__up" }
+// func (p parentSelector) feature(r adt.Runtime) adt.Feature {
+// return adt.InvalidLabel
+// }
+
+type pathError struct {
+ errors.Error
+}
+
+func (p pathError) String() string { return "" }
+func (p pathError) optional() bool { return false }
+func (p pathError) kind() adt.FeatureType { return 0 }
+func (p pathError) feature(r adt.Runtime) adt.Feature {
+ return adt.InvalidLabel
+}
+
+func valueToSel(v adt.Value) Selector {
+ switch x := adt.Unwrap(v).(type) {
+ case *adt.Num:
+ i, err := x.X.Int64()
+ if err != nil {
+ return Selector{&pathError{errors.Promote(err, "invalid number")}}
+ }
+ return Index(int(i))
+ case *adt.String:
+ return Str(x.Str)
+ default:
+ return Selector{pathError{errors.Newf(token.NoPos, "dynamic selector")}}
+ }
+}
+
+func featureToSel(f adt.Feature, r adt.Runtime) Selector {
+ switch f.Typ() {
+ case adt.StringLabel:
+ return Str(f.StringValue(r))
+ case adt.IntLabel:
+ return Index(f.Index())
+ case adt.DefinitionLabel:
+ return Def(f.IdentString(r))
+ case adt.HiddenLabel, adt.HiddenDefinitionLabel:
+ ident := f.IdentString(r)
+ pkg := f.PkgID(r)
+ return Hid(ident, pkg)
+ }
+ return Selector{pathError{
+ errors.Newf(token.NoPos, "unexpected feature type %v", f.Typ()),
+ }}
+}
diff --git a/vendor/cuelang.org/go/cue/query.go b/vendor/cuelang.org/go/cue/query.go
new file mode 100644
index 0000000000..af046ee86f
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/query.go
@@ -0,0 +1,84 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "cuelang.org/go/internal/core/adt"
+)
+
+// This file contains query-related code.
+
+// getScopePrefix finds the Vertex that exists in v for the longest prefix of p.
+//
+// It is used to make the parent scopes visible when resolving expressions.
+func getScopePrefix(v Value, p Path) Value {
+ for _, sel := range p.Selectors() {
+ w := v.LookupPath(MakePath(sel))
+ if !w.Exists() {
+ break
+ }
+ v = w
+ }
+ return v
+}
+
+// LookupPath reports the value for path p relative to v.
+func (v Value) LookupPath(p Path) Value {
+ if v.v == nil {
+ return Value{}
+ }
+ n := v.v
+ parent := v.parent_
+ ctx := v.ctx()
+
+outer:
+ for _, sel := range p.path {
+ f := sel.sel.feature(v.idx)
+ for _, a := range n.Arcs {
+ if a.Label == f {
+ parent = linkParent(parent, n, a)
+ n = a
+ continue outer
+ }
+ }
+ if sel.sel.optional() {
+ x := &adt.Vertex{
+ Parent: n,
+ Label: sel.sel.feature(ctx),
+ }
+ n.MatchAndInsert(ctx, x)
+ if len(x.Conjuncts) > 0 {
+ x.Finalize(ctx)
+ parent = linkParent(parent, n, x)
+ n = x
+ continue
+ }
+ }
+
+ var x *adt.Bottom
+ if err, ok := sel.sel.(pathError); ok {
+ x = &adt.Bottom{Err: err.Error}
+ } else {
+ x = mkErr(v.idx, n, adt.EvalError, "field not found: %v", sel.sel)
+ if n.Accept(ctx, f) {
+ x.Code = adt.IncompleteError
+ }
+ x.NotExists = true
+ }
+ v := makeValue(v.idx, n, parent)
+ return newErrValue(v, x)
+ }
+ return makeValue(v.idx, n, parent)
+}
diff --git a/vendor/cuelang.org/go/cue/scanner/fuzz.go b/vendor/cuelang.org/go/cue/scanner/fuzz.go
new file mode 100644
index 0000000000..376a57e419
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/scanner/fuzz.go
@@ -0,0 +1,40 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+// +build gofuzz
+
+package scanner
+
+import (
+ "cuelang.org/go/cue/token"
+)
+
+func Fuzz(b []byte) int {
+ retCode := 1
+ eh := func(_ token.Pos, msg string, args []interface{}) {
+ retCode = 0
+ }
+
+ var s Scanner
+ s.Init(token.NewFile("", 1, len(b)), b, eh, ScanComments)
+
+ for {
+ _, tok, _ := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ }
+ return retCode
+}
diff --git a/vendor/cuelang.org/go/cue/scanner/scanner.go b/vendor/cuelang.org/go/cue/scanner/scanner.go
new file mode 100644
index 0000000000..84b3643d32
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/scanner/scanner.go
@@ -0,0 +1,991 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package scanner implements a scanner for CUE source text. It takes a []byte
+// as source which can then be tokenized through repeated calls to the Scan
+// method.
+package scanner // import "cuelang.org/go/cue/scanner"
+
+import (
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cuelang.org/go/cue/token"
+)
+
+// An ErrorHandler is a generic error handler used throughout CUE packages.
+//
+// The position points to the beginning of the offending value.
+type ErrorHandler func(pos token.Pos, msg string, args []interface{})
+
+// A Scanner holds the Scanner's internal state while processing
+// a given text. It can be allocated as part of another data
+// structure but must be initialized via Init before use.
+type Scanner struct {
+ // immutable state
+ file *token.File // source file handle
+ dir string // directory portion of file.Name()
+ src []byte // source
+ errh ErrorHandler // error reporting; or nil
+ mode Mode // scanning mode
+
+ // scanning state
+ ch rune // current character
+ offset int // character offset
+ rdOffset int // reading offset (position after current character)
+ lineOffset int // current line offset
+ linesSinceLast int
+ spacesSinceLast int
+ insertEOL bool // insert a comma before next newline
+
+ quoteStack []quoteInfo
+
+ // public state - ok to modify
+ ErrorCount int // number of errors encountered
+}
+
+type quoteInfo struct {
+ char rune
+ numChar int
+ numHash int
+}
+
+const bom = 0xFEFF // byte order mark, only permitted as very first character
+
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
+func (s *Scanner) next() {
+ if s.rdOffset < len(s.src) {
+ s.offset = s.rdOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ r, w := rune(s.src[s.rdOffset]), 1
+ switch {
+ case r == 0:
+ s.errf(s.offset, "illegal character NUL")
+ case r >= utf8.RuneSelf:
+ // not ASCII
+ r, w = utf8.DecodeRune(s.src[s.rdOffset:])
+ if r == utf8.RuneError && w == 1 {
+ s.errf(s.offset, "illegal UTF-8 encoding")
+ } else if r == bom && s.offset > 0 {
+ s.errf(s.offset, "illegal byte order mark")
+ }
+ }
+ s.rdOffset += w
+ s.ch = r
+ } else {
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ s.ch = -1 // eof
+ }
+}
+
+// A Mode value is a set of flags (or 0).
+// They control scanner behavior.
+type Mode uint
+
+// These constants are options to the Init function.
+const (
+ ScanComments Mode = 1 << iota // return comments as COMMENT tokens
+ dontInsertCommas // do not automatically insert commas - for testing only
+)
+
+// Init prepares the scanner s to tokenize the text src by setting the
+// scanner at the beginning of src. The scanner uses the file set file
+// for position information and it adds line information for each line.
+// It is ok to re-use the same file when re-scanning the same file as
+// line information which is already present is ignored. Init causes a
+// panic if the file size does not match the src size.
+//
+// Calls to Scan will invoke the error handler err if they encounter a
+// syntax error and err is not nil. Also, for each error encountered,
+// the Scanner field ErrorCount is incremented by one. The mode parameter
+// determines how comments are handled.
+//
+// Note that Init may call err if there is an error in the first character
+// of the file.
+func (s *Scanner) Init(file *token.File, src []byte, eh ErrorHandler, mode Mode) {
+ // Explicitly initialize all fields since a scanner may be reused.
+ if file.Size() != len(src) {
+ panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
+ }
+ s.file = file
+ s.dir, _ = filepath.Split(file.Name())
+ s.src = src
+ s.errh = eh
+ s.mode = mode
+
+ s.ch = ' '
+ s.offset = 0
+ s.rdOffset = 0
+ s.lineOffset = 0
+ s.insertEOL = false
+ s.ErrorCount = 0
+
+ s.next()
+ if s.ch == bom {
+ s.next() // ignore BOM at file beginning
+ }
+}
+
+func (s *Scanner) errf(offs int, msg string, args ...interface{}) {
+ if s.errh != nil {
+ s.errh(s.file.Pos(offs, 0), msg, args)
+ }
+ s.ErrorCount++
+}
+
+var prefix = []byte("//line ")
+
+func (s *Scanner) interpretLineComment(text []byte) {
+ if bytes.HasPrefix(text, prefix) {
+ // get filename and line number, if any
+ if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
+ if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 {
+ // valid //line filename:line comment
+ filename := string(bytes.TrimSpace(text[len(prefix):i]))
+ if filename != "" {
+ filename = filepath.Clean(filename)
+ if !filepath.IsAbs(filename) {
+ // make filename relative to current directory
+ filename = filepath.Join(s.dir, filename)
+ }
+ }
+ // update scanner position
+ s.file.AddLineInfo(s.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line
+ }
+ }
+ }
+}
+
+func (s *Scanner) scanComment() string {
+ // initial '/' already consumed; s.ch == '/' || s.ch == '*'
+ offs := s.offset - 1 // position of initial '/'
+ hasCR := false
+
+ if s.ch == '/' {
+ //-style comment
+ s.next()
+ for s.ch != '\n' && s.ch >= 0 {
+ if s.ch == '\r' {
+ hasCR = true
+ }
+ s.next()
+ }
+ if offs == s.lineOffset {
+ // comment starts at the beginning of the current line
+ s.interpretLineComment(s.src[offs:s.offset])
+ }
+ goto exit
+ }
+
+ s.errf(offs, "comment not terminated")
+
+exit:
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ // TODO: preserve /r/n
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
+}
+
+func (s *Scanner) findLineEnd() bool {
+ // initial '/' already consumed
+
+ defer func(offs int) {
+ // reset scanner state to where it was upon calling findLineEnd
+ s.ch = '/'
+ s.offset = offs
+ s.rdOffset = offs + 1
+ s.next() // consume initial '/' again
+ }(s.offset - 1)
+
+ // read ahead until a newline, EOF, or non-comment token is found
+ for s.ch == '/' || s.ch == '*' {
+ if s.ch == '/' {
+ //-style comment always contains a newline
+ return true
+ }
+ /*-style comment: look for newline */
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ if ch == '\n' {
+ return true
+ }
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
+ break
+ }
+ }
+ s.skipWhitespace(0) // s.insertSemi is set
+ if s.ch < 0 || s.ch == '\n' {
+ return true
+ }
+ if s.ch != '/' {
+ // non-comment token
+ return false
+ }
+ s.next() // consume '/'
+ }
+
+ return false
+}
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ // TODO(mpvl): Is this correct?
+ return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
+}
+
+func (s *Scanner) scanFieldIdentifier() string {
+ offs := s.offset
+ if s.ch == '_' {
+ s.next()
+ }
+ if s.ch == '#' {
+ s.next()
+ // TODO: remove this block to allow #
+ if isDigit(s.ch) {
+ return string(s.src[offs:s.offset])
+ }
+ }
+ for isLetter(s.ch) || isDigit(s.ch) || s.ch == '_' || s.ch == '$' {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) || s.ch == '_' || s.ch == '$' {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func isExtendedIdent(r rune) bool {
+ return strings.IndexRune("-_#$%. ", r) >= 0
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case ch == '_':
+ return 0
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func (s *Scanner) scanMantissa(base int) {
+ var last rune
+ for digitVal(s.ch) < base {
+ if last == '_' && s.ch == '_' {
+ s.errf(s.offset, "illegal '_' in number")
+ }
+ last = s.ch
+ s.next()
+ }
+ if last == '_' {
+ s.errf(s.offset-1, "illegal '_' in number")
+ }
+}
+
+func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) {
+ // digitVal(s.ch) < 10
+ offs := s.offset
+ tok := token.INT
+
+ if seenDecimalPoint {
+ offs--
+ tok = token.FLOAT
+ s.scanMantissa(10)
+ goto exponent
+ }
+
+ if s.ch == '0' {
+ // int or float
+ offs := s.offset
+ s.next()
+ if s.ch == 'x' || s.ch == 'X' {
+ // hexadecimal int
+ s.next()
+ s.scanMantissa(16)
+ if s.offset-offs <= 2 {
+ // only scanned "0x" or "0X"
+ s.errf(offs, "illegal hexadecimal number")
+ }
+ } else if s.ch == 'b' {
+ // binary int
+ s.next()
+ s.scanMantissa(2)
+ if s.offset-offs <= 2 {
+ // only scanned "0b"
+ s.errf(offs, "illegal binary number")
+ }
+ } else if s.ch == 'o' {
+ // octal int
+ s.next()
+ s.scanMantissa(8)
+ if s.offset-offs <= 2 {
+ // only scanned "0o"
+ s.errf(offs, "illegal octal number")
+ }
+ } else {
+ // 0 or float
+ seenDigits := false
+ if s.ch >= '0' && s.ch <= '9' {
+ seenDigits = true
+ s.scanMantissa(10)
+ }
+ if s.ch == '.' || s.ch == 'e' || s.ch == 'E' {
+ goto fraction
+ }
+ if seenDigits {
+ // integer other than 0 may not start with 0
+ s.errf(offs, "illegal integer number")
+ }
+ }
+ goto exit
+ }
+
+ // decimal int or float
+ s.scanMantissa(10)
+
+ // TODO: allow 3h4s, etc.
+ // switch s.ch {
+ // case 'h', 'm', 's', "µ"[0], 'u', 'n':
+ // }
+
+fraction:
+ if s.ch == '.' {
+ if p := s.offset + 1; p < len(s.src) && s.src[p] == '.' {
+ // interpret dot as part of a range.
+ goto exit
+ }
+ tok = token.FLOAT
+ s.next()
+ s.scanMantissa(10)
+ }
+
+exponent:
+ switch s.ch {
+ case 'K', 'M', 'G', 'T', 'P':
+ tok = token.INT // TODO: Or should we allow this to be a float?
+ s.next()
+ if s.ch == 'i' {
+ s.next()
+ }
+ goto exit
+ }
+
+ if s.ch == 'e' || s.ch == 'E' {
+ tok = token.FLOAT
+ s.next()
+ if s.ch == '-' || s.ch == '+' {
+ s.next()
+ }
+ s.scanMantissa(10)
+ }
+
+exit:
+ return tok, string(s.src[offs:s.offset])
+}
+
+// scanEscape parses an escape sequence where rune is the accepted
+// escaped quote. In case of a syntax error, it stops at the offending
+// character (without consuming it) and returns false. Otherwise
+// it returns true.
+//
+// Must be compliant with https://tools.ietf.org/html/rfc4627.
+func (s *Scanner) scanEscape(quote quoteInfo) (ok, interpolation bool) {
+ for i := 0; i < quote.numHash; i++ {
+ if s.ch != '#' {
+ return true, false
+ }
+ s.next()
+ }
+
+ offs := s.offset
+
+ var n int
+ var base, max uint32
+ switch s.ch {
+ case '(':
+ return true, true
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '/', quote.char:
+ s.next()
+ return true, false
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.next()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.next()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.next()
+ n, base, max = 8, 16, unicode.MaxRune
+ default:
+ msg := "unknown escape sequence"
+ if s.ch < 0 {
+ msg = "escape sequence not terminated"
+ }
+ s.errf(offs, msg)
+ return false, false
+ }
+
+ var x uint32
+ for n > 0 {
+ d := uint32(digitVal(s.ch))
+ if d >= base {
+ if s.ch < 0 {
+ s.errf(s.offset, "escape sequence not terminated")
+ } else {
+ s.errf(s.offset, "illegal character %#U in escape sequence", s.ch)
+ }
+ return false, false
+ }
+ x = x*base + d
+ s.next()
+ n--
+ }
+
+ // TODO: this is valid JSON, so remove, but normalize and report an error
+ // if for unmatched surrogate pairs .
+ if x > max {
+ s.errf(offs, "escape sequence is invalid Unicode code point")
+ return false, false
+ }
+
+ return true, false
+}
+
+func (s *Scanner) scanString(offs int, quote quoteInfo) (token.Token, string) {
+ // ", """, ', or ''' opening already consumed
+
+ tok := token.STRING
+
+ hasCR := false
+ extra := 0
+ for {
+ ch := s.ch
+ if (quote.numChar != 3 && ch == '\n') || ch < 0 {
+ s.errf(offs, "string literal not terminated")
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+ return tok, string(lit)
+ }
+
+ s.next()
+ ch, ok := s.consumeStringClose(ch, quote)
+ if ok {
+ break
+ }
+ if ch == '\r' && quote.numChar == 3 {
+ hasCR = true
+ }
+ if ch == '\\' {
+ if _, interpolation := s.scanEscape(quote); interpolation {
+ tok = token.INTERPOLATION
+ extra = 1
+ s.quoteStack = append(s.quoteStack, quote)
+ break
+ }
+ }
+ }
+ lit := s.src[offs : s.offset+extra]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+ return tok, string(lit)
+}
+
+func (s *Scanner) consumeQuotes(quote rune, max int) (next rune, n int) {
+ for ; n < max; n++ {
+ if s.ch != quote {
+ return s.ch, n
+ }
+ s.next()
+ }
+ return s.ch, n
+}
+
+func (s *Scanner) consumeStringClose(ch rune, quote quoteInfo) (next rune, atEnd bool) {
+ if quote.char != ch {
+ return ch, false
+ }
+ numChar := quote.numChar
+ n := numChar + quote.numHash
+ want := quote.char
+ for i := 1; i < n; i++ {
+ if i == numChar {
+ want = '#'
+ }
+ if want != s.ch {
+ return ch, false
+ }
+ ch = s.ch
+ s.next()
+ }
+ return s.ch, true
+}
+
+func (s *Scanner) checkHashCount(offs int, quote quoteInfo) {
+ for i := 0; i < quote.numHash; i++ {
+ if s.ch != '#' {
+ s.errf(offs, "string literal not terminated")
+ return
+ }
+ s.next()
+ }
+}
+
+func stripCR(b []byte) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for _, ch := range b {
+ if ch != '\r' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
+
+// scanAttribute scans aa full attribute of the form @foo(str). An attribute
+// is a lexical entry and as such whitespace is treated as normal characters
+// within the attribute.
+func (s *Scanner) scanAttribute() (tok token.Token, lit string) {
+ offs := s.offset - 1 // @ already consumed
+
+ s.scanIdentifier()
+
+ if _, tok, _ := s.Scan(); tok == token.LPAREN {
+ s.scanAttributeTokens(token.RPAREN)
+ } else {
+ s.errf(s.offset, "invalid attribute: expected '('")
+ }
+ return token.ATTRIBUTE, string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanAttributeTokens(close token.Token) {
+ for {
+ switch _, tok, _ := s.Scan(); tok {
+ case close:
+ return
+ case token.EOF:
+ s.errf(s.offset, "attribute missing '%s'", close)
+ return
+
+ case token.INTERPOLATION:
+ s.errf(s.offset, "interpolation not allowed in attribute")
+ s.popInterpolation()
+ s.recoverParen(1)
+ case token.LPAREN:
+ s.scanAttributeTokens(token.RPAREN)
+ case token.LBRACE:
+ s.scanAttributeTokens(token.RBRACE)
+ case token.LBRACK:
+ s.scanAttributeTokens(token.RBRACK)
+ case token.RPAREN, token.RBRACK, token.RBRACE:
+ s.errf(s.offset, "unexpected '%s'", tok)
+ }
+ }
+}
+
+// recoverParen is an approximate recovery mechanism to recover from invalid
+// attributes.
+func (s *Scanner) recoverParen(open int) {
+ for {
+ switch s.ch {
+ case '\n', -1:
+ return
+ case '(':
+ open++
+ case ')':
+ if open--; open == 0 {
+ return
+ }
+ }
+ s.next()
+ }
+}
+
+func (s *Scanner) skipWhitespace(inc int) {
+ for {
+ switch s.ch {
+ case ' ', '\t':
+ s.spacesSinceLast += inc
+ case '\n':
+ s.linesSinceLast += inc
+ if s.insertEOL {
+ return
+ }
+ case '\r':
+ default:
+ return
+ }
+ s.next()
+ }
+}
+
+// Helper functions for scanning multi-byte tokens such as >> += >>= .
+// Different routines recognize different length tok_i based on matches
+// of ch_i. If a token ends in '=', the result is tok1 or tok3
+// respectively. Otherwise, the result is tok0 if there was no other
+// matching character, or tok2 if the matching character was ch2.
+
+func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ return tok0
+}
+
+func (s *Scanner) popInterpolation() quoteInfo {
+ quote := s.quoteStack[len(s.quoteStack)-1]
+ s.quoteStack = s.quoteStack[:len(s.quoteStack)-1]
+ return quote
+}
+
+// ResumeInterpolation resumes scanning of a string interpolation.
+func (s *Scanner) ResumeInterpolation() string {
+ quote := s.popInterpolation()
+ _, str := s.scanString(s.offset-1, quote)
+ return str
+}
+
+// Scan scans the next token and returns the token position, the token,
+// and its literal string if applicable. The source end is indicated by
+// EOF.
+//
+// If the returned token is a literal (IDENT, INT, FLOAT,
+// IMAG, CHAR, STRING) or COMMENT, the literal string
+// has the corresponding value.
+//
+// If the returned token is a keyword, the literal string is the keyword.
+//
+// If the returned token is Comma, the corresponding
+// literal string is "," if the comma was present in the source,
+// and "\n" if the semicolon was inserted because of a newline or
+// at EOF.
+//
+// If the returned token is ILLEGAL, the literal string is the
+// offending character.
+//
+// In all other cases, Scan returns an empty literal string.
+//
+// For more tolerant parsing, Scan will return a valid token if
+// possible even if a syntax error was encountered. Thus, even
+// if the resulting token sequence contains no illegal tokens,
+// a client may not assume that no error occurred. Instead it
+// must check the scanner's ErrorCount or the number of calls
+// of the error handler, if there was one installed.
+//
+// Scan adds line information to the file added to the file
+// set with Init. Token positions are relative to that file
+// and thus relative to the file set.
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
+scanAgain:
+ s.skipWhitespace(1)
+
+ var rel token.RelPos
+ switch {
+ case s.linesSinceLast > 1:
+ rel = token.NewSection
+ case s.linesSinceLast == 1:
+ rel = token.Newline
+ case s.spacesSinceLast > 0:
+ rel = token.Blank
+ default:
+ rel = token.NoSpace
+ }
+ // current token start
+ offset := s.offset
+ pos = s.file.Pos(offset, rel)
+
+ // determine token value
+ insertEOL := false
+ var quote quoteInfo
+ switch ch := s.ch; {
+ case '0' <= ch && ch <= '9':
+ insertEOL = true
+ tok, lit = s.scanNumber(false)
+ case isLetter(ch), ch == '$', ch == '#':
+ lit = s.scanFieldIdentifier()
+ if len(lit) > 1 {
+ // keywords are longer than one letter - avoid lookup otherwise
+ tok = token.Lookup(lit)
+ insertEOL = true
+ break
+ }
+ if ch != '#' || (s.ch != '\'' && s.ch != '"' && s.ch != '#') {
+ tok = token.IDENT
+ insertEOL = true
+ break
+ }
+ quote.numHash = 1
+ ch = s.ch
+ fallthrough
+ default:
+ s.next() // always make progress
+ switch ch {
+ case -1:
+ if s.insertEOL {
+ s.insertEOL = false // EOF consumed
+ return s.file.Pos(offset, token.Elided), token.COMMA, "\n"
+ }
+ tok = token.EOF
+ case '_':
+ if s.ch == '|' {
+ // Unconditionally require this to be followed by another
+ // underscore to avoid needing an extra lookahead.
+ // Note that `_|x` is always equal to _.
+ s.next()
+ if s.ch != '_' {
+ s.errf(s.file.Offset(pos), "illegal token '_|'; expected '_'")
+ insertEOL = s.insertEOL // preserve insertComma info
+ tok = token.ILLEGAL
+ lit = "_|"
+ break
+ }
+ s.next()
+ tok = token.BOTTOM
+ lit = "_|_"
+ } else {
+ tok = token.IDENT
+ lit = "_" + s.scanFieldIdentifier()
+ }
+ insertEOL = true
+
+ case '\n':
+ // we only reach here if s.insertComma was
+ // set in the first place and exited early
+ // from s.skipWhitespace()
+ s.insertEOL = false // newline consumed
+ p := s.file.Pos(offset, token.Elided)
+ s.skipWhitespace(1)
+ // Don't elide comma before a ',' or ':' to ensure JSON
+ // conformance. Note that cue fmt should immediately undo those.
+ if s.ch == ',' || s.ch == ':' {
+ return s.Scan()
+ }
+ return p, token.COMMA, "\n"
+
+ case '#':
+ for quote.numHash++; s.ch == '#'; quote.numHash++ {
+ s.next()
+ }
+ ch = s.ch
+ if ch != '\'' && ch != '"' {
+ break
+ }
+ s.next()
+ fallthrough
+ case '"', '\'':
+ insertEOL = true
+ quote.char = ch
+ quote.numChar = 1
+ offs := s.offset - 1 - quote.numHash
+ switch _, n := s.consumeQuotes(ch, 2); n {
+ case 0:
+ quote.numChar = 1
+ tok, lit = s.scanString(offs, quote)
+ case 1:
+ s.checkHashCount(offs, quote)
+ tok, lit = token.STRING, string(s.src[offs:s.offset])
+ case 2:
+ quote.numChar = 3
+ switch s.ch {
+ case '\n':
+ s.next()
+ tok, lit = s.scanString(offs, quote)
+ case '\r':
+ s.next()
+ if s.ch == '\n' {
+ s.next()
+ tok, lit = s.scanString(offs, quote)
+ break
+ }
+ fallthrough
+ default:
+ s.errf(offs, "expected newline after multiline quote %s",
+ s.src[offs:s.offset])
+ tok, lit = token.STRING, string(s.src[offs:s.offset])
+ }
+ }
+ case '@':
+ insertEOL = true
+ tok, lit = s.scanAttribute()
+ case ':':
+ if s.ch == ':' {
+ s.next()
+ tok = token.ISA
+ } else {
+ tok = token.COLON
+ }
+ case ';':
+ tok = token.SEMICOLON
+ insertEOL = true
+ case '?':
+ tok = token.OPTION
+ insertEOL = true
+ case '.':
+ if '0' <= s.ch && s.ch <= '9' {
+ insertEOL = true
+ tok, lit = s.scanNumber(true)
+ } else if s.ch == '.' {
+ s.next()
+ if s.ch == '.' {
+ s.next()
+ tok = token.ELLIPSIS
+ insertEOL = true
+ } else {
+ s.errf(s.file.Offset(pos), "illegal token '..'; expected '.'")
+ }
+ } else {
+ tok = token.PERIOD
+ }
+ case ',':
+ tok = token.COMMA
+ lit = ","
+ case '(':
+ tok = token.LPAREN
+ case ')':
+ insertEOL = true
+ tok = token.RPAREN
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ insertEOL = true
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ insertEOL = true
+ tok = token.RBRACE
+ case '+':
+ tok = token.ADD // Consider ++ for list concatenate.
+ case '-':
+ tok = token.SUB
+ case '*':
+ tok = token.MUL
+ case '/':
+ if s.ch == '/' {
+ // comment
+ if s.insertEOL && s.findLineEnd() {
+ // reset position to the beginning of the comment
+ s.ch = '/'
+ s.offset = s.file.Offset(pos)
+ s.rdOffset = s.offset + 1
+ s.insertEOL = false // newline consumed
+ return s.file.Pos(offset, token.Elided), token.COMMA, "\n"
+ }
+ comment := s.scanComment()
+ if s.mode&ScanComments == 0 {
+ // skip comment
+ s.insertEOL = false // newline consumed
+ goto scanAgain
+ }
+ tok = token.COMMENT
+ lit = comment
+ } else {
+ tok = token.QUO
+ }
+ // We no longer use %, but seems like a useful token to use for
+ // something else at some point.
+ // case '%':
+ case '<':
+ if s.ch == '-' {
+ s.next()
+ tok = token.ARROW
+ } else {
+ tok = s.switch2(token.LSS, token.LEQ)
+ }
+ case '>':
+ tok = s.switch2(token.GTR, token.GEQ)
+ case '=':
+ if s.ch == '~' {
+ s.next()
+ tok = token.MAT
+ } else {
+ tok = s.switch2(token.BIND, token.EQL)
+ }
+ case '!':
+ if s.ch == '~' {
+ s.next()
+ tok = token.NMAT
+ } else {
+ tok = s.switch2(token.NOT, token.NEQ)
+ }
+ case '&':
+ switch s.ch {
+ case '&':
+ s.next()
+ tok = token.LAND
+ default:
+ tok = token.AND
+ }
+ case '|':
+ if s.ch == '|' {
+ s.next()
+ tok = token.LOR
+ } else {
+ tok = token.OR
+ }
+ default:
+ // next reports unexpected BOMs - don't repeat
+ if ch != bom {
+ s.errf(s.file.Offset(pos), "illegal character %#U", ch)
+ }
+ insertEOL = s.insertEOL // preserve insertSemi info
+ tok = token.ILLEGAL
+ lit = string(ch)
+ }
+ }
+ if s.mode&dontInsertCommas == 0 {
+ s.insertEOL = insertEOL
+ }
+
+ s.linesSinceLast = 0
+ s.spacesSinceLast = 0
+ return
+}
diff --git a/vendor/cuelang.org/go/cue/token/position.go b/vendor/cuelang.org/go/cue/token/position.go
new file mode 100644
index 0000000000..937108382b
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/token/position.go
@@ -0,0 +1,472 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package token
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+)
+
+// -----------------------------------------------------------------------------
+// Positions
+
+// Position describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Position struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (byte count)
+ // RelPos Pos // relative position information
+}
+
+// IsValid reports whether the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+//
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Pos is a compact encoding of a source position within a file, as well as
+// relative positioning information. It can be converted into a Position for a
+// more convenient, but much larger, representation.
+//
+type Pos struct {
+ file *File
+ offset int
+}
+
+// File returns the file that contains the position p or nil if there is no
+// such file (for instance for p == NoPos).
+//
+func (p Pos) File() *File {
+ if p.index() == 0 {
+ return nil
+ }
+ return p.file
+}
+
+func (p Pos) Line() int {
+ if p.file == nil {
+ return 0
+ }
+ return p.Position().Line
+}
+
+func (p Pos) Column() int {
+ if p.file == nil {
+ return 0
+ }
+ return p.Position().Column
+}
+
+func (p Pos) Filename() string {
+ if p.file == nil {
+ return ""
+ }
+ return p.Position().Filename
+}
+
+func (p Pos) Position() Position {
+ if p.file == nil {
+ return Position{}
+ }
+ return p.file.Position(p)
+}
+
+func (p Pos) String() string {
+ return p.Position().String()
+}
+
+// NoPos is the zero value for Pos; there is no file and line information
+// associated with it, and NoPos().IsValid() is false. NoPos is always
+// smaller than any other Pos value. The corresponding Position value
+// for NoPos is the zero value for Position.
+var NoPos = Pos{}
+
+// RelPos indicates the relative position of token to the previous token.
+type RelPos int
+
+const (
+ // NoRelPos indicates no relative position is specified.
+ NoRelPos RelPos = iota
+
+ // Elided indicates that the token for which this position is defined is
+ // not rendered at all.
+ Elided
+
+ // NoSpace indicates there is no whitespace after this token.
+ NoSpace
+
+ // Blank means there is horizontal space after this token.
+ Blank
+
+ // Newline means there is a single newline after this token.
+ Newline
+
+ // NewSection means there are two or more newlines after this token.
+ NewSection
+
+ relMask = 0xf
+ relShift = 4
+)
+
+var relNames = []string{
+ "invalid", "elided", "nospace", "blank", "newline", "section",
+}
+
+func (p RelPos) String() string { return relNames[p] }
+
+func (p RelPos) Pos() Pos {
+ return Pos{nil, int(p)}
+}
+
+// HasRelPos repors whether p has a relative position.
+func (p Pos) HasRelPos() bool {
+ return p.offset&relMask != 0
+
+}
+
+func (p Pos) Before(q Pos) bool {
+ return p.file == q.file && p.Offset() < q.Offset()
+}
+
+// Offset reports the byte offset relative to the file.
+func (p Pos) Offset() int {
+ return p.Position().Offset
+}
+
+// Add creates a new position relative to the p offset by n.
+func (p Pos) Add(n int) Pos {
+ return Pos{p.file, p.offset + toPos(index(n))}
+}
+
+// IsValid reports whether the position is valid.
+func (p Pos) IsValid() bool {
+ return p != NoPos
+}
+
+// IsNewline reports whether the relative information suggests this node should
+// be printed on a new lien.
+func (p Pos) IsNewline() bool {
+ return p.RelPos() >= Newline
+}
+
+func (p Pos) WithRel(rel RelPos) Pos {
+ return Pos{p.file, p.offset&^relMask | int(rel)}
+}
+
+func (p Pos) RelPos() RelPos {
+ return RelPos(p.offset & relMask)
+}
+
+func (p Pos) index() index {
+ return index(p.offset) >> relShift
+}
+
+func toPos(x index) int {
+ return (int(x) << relShift)
+}
+
+// -----------------------------------------------------------------------------
+// File
+
+type index int
+
+// A File has a name, size, and line offset table.
+type File struct {
+ mutex sync.RWMutex
+ name string // file name as provided to AddFile
+ base index // Pos index range for this file is [base...base+size]
+ size index // file size as provided to AddFile
+
+ // lines and infos are protected by set.mutex
+ lines []index // lines contains the offset of the first character for each line (the first entry is always 0)
+ infos []lineInfo
+}
+
+// NewFile returns a new file.
+func NewFile(filename string, base, size int) *File {
+ if base < 0 {
+ base = 1
+ }
+ return &File{sync.RWMutex{}, filename, index(base), index(size), []index{0}, nil}
+}
+
+// Name returns the file name of file f as registered with AddFile.
+func (f *File) Name() string {
+ return f.name
+}
+
+// Base returns the base offset of file f as registered with AddFile.
+func (f *File) Base() int {
+ return int(f.base)
+}
+
+// Size returns the size of file f as registered with AddFile.
+func (f *File) Size() int {
+ return int(f.size)
+}
+
+// LineCount returns the number of lines in file f.
+func (f *File) LineCount() int {
+ f.mutex.RLock()
+ n := len(f.lines)
+ f.mutex.RUnlock()
+ return n
+}
+
+// AddLine adds the line offset for a new line.
+// The line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise the line offset is ignored.
+//
+func (f *File) AddLine(offset int) {
+ x := index(offset)
+ f.mutex.Lock()
+ if i := len(f.lines); (i == 0 || f.lines[i-1] < x) && x < f.size {
+ f.lines = append(f.lines, x)
+ }
+ f.mutex.Unlock()
+}
+
+// MergeLine merges a line with the following line. It is akin to replacing
+// the newline character at the end of the line with a space (to not change the
+// remaining offsets). To obtain the line number, consult e.g. Position.Line.
+// MergeLine will panic if given an invalid line number.
+//
+func (f *File) MergeLine(line int) {
+ if line <= 0 {
+ panic("illegal line number (line numbering starts at 1)")
+ }
+ f.mutex.Lock()
+ defer f.mutex.Unlock()
+ if line >= len(f.lines) {
+ panic("illegal line number")
+ }
+ // To merge the line numbered with the line numbered ,
+ // we need to remove the entry in lines corresponding to the line
+ // numbered . The entry in lines corresponding to the line
+ // numbered is located at index , since indices in lines
+ // are 0-based and line numbers are 1-based.
+ copy(f.lines[line:], f.lines[line+1:])
+ f.lines = f.lines[:len(f.lines)-1]
+}
+
+// SetLines sets the line offsets for a file and reports whether it succeeded.
+// The line offsets are the offsets of the first character of each line;
+// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
+// An empty file has an empty line offset table.
+// Each line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise SetLines fails and returns
+// false.
+// Callers must not mutate the provided slice after SetLines returns.
+//
+func (f *File) SetLines(lines []int) bool {
+ // verify validity of lines table
+ size := f.size
+ for i, offset := range lines {
+ if i > 0 && offset <= lines[i-1] || size <= index(offset) {
+ return false
+ }
+ }
+
+ // set lines table
+ f.mutex.Lock()
+ f.lines = f.lines[:0]
+ for _, l := range lines {
+ f.lines = append(f.lines, index(l))
+ }
+ f.mutex.Unlock()
+ return true
+}
+
+// SetLinesForContent sets the line offsets for the given file content.
+// It ignores position-altering //line comments.
+func (f *File) SetLinesForContent(content []byte) {
+ var lines []index
+ line := index(0)
+ for offset, b := range content {
+ if line >= 0 {
+ lines = append(lines, line)
+ }
+ line = -1
+ if b == '\n' {
+ line = index(offset) + 1
+ }
+ }
+
+ // set lines table
+ f.mutex.Lock()
+ f.lines = lines
+ f.mutex.Unlock()
+}
+
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
+type lineInfo struct {
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
+}
+
+// AddLineInfo adds alternative file and line number information for
+// a given file offset. The offset must be larger than the offset for
+// the previously added alternative line info and smaller than the
+// file size; otherwise the information is ignored.
+//
+// AddLineInfo is typically used to register alternative position
+// information for //line filename:line comments in source files.
+//
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+ x := index(offset)
+ f.mutex.Lock()
+ if i := len(f.infos); i == 0 || index(f.infos[i-1].Offset) < x && x < f.size {
+ f.infos = append(f.infos, lineInfo{offset, filename, line})
+ }
+ f.mutex.Unlock()
+}
+
+// Pos returns the Pos value for the given file offset;
+// the offset must be <= f.Size().
+// f.Pos(f.Offset(p)) == p.
+//
+func (f *File) Pos(offset int, rel RelPos) Pos {
+ if index(offset) > f.size {
+ panic("illegal file offset")
+ }
+ return Pos{f, toPos(f.base+index(offset)) + int(rel)}
+}
+
+// Offset returns the offset for the given file position p;
+// p must be a valid Pos value in that file.
+// f.Offset(f.Pos(offset)) == offset.
+//
+func (f *File) Offset(p Pos) int {
+ x := p.index()
+ if x < f.base || x > f.base+index(f.size) {
+ panic("illegal Pos value")
+ }
+ return int(x - f.base)
+}
+
+// Line returns the line number for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Line(p Pos) int {
+ return f.Position(p).Line
+}
+
+func searchLineInfos(a []lineInfo, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
+}
+
+// unpack returns the filename and line and column number for a file offset.
+// If adjusted is set, unpack will return the filename and line information
+// possibly adjusted by //line comments; otherwise those comments are ignored.
+//
+func (f *File) unpack(offset index, adjusted bool) (filename string, line, column int) {
+ filename = f.name
+ if i := searchInts(f.lines, offset); i >= 0 {
+ line, column = int(i+1), int(offset-f.lines[i]+1)
+ }
+ if adjusted && len(f.infos) > 0 {
+ // almost no files have extra line infos
+ if i := searchLineInfos(f.infos, int(offset)); i >= 0 {
+ alt := &f.infos[i]
+ filename = alt.Filename
+ if i := searchInts(f.lines, index(alt.Offset)); i >= 0 {
+ line += alt.Line - i - 1
+ }
+ }
+ }
+ return
+}
+
+func (f *File) position(p Pos, adjusted bool) (pos Position) {
+ offset := p.index() - f.base
+ pos.Offset = int(offset)
+ pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted)
+ return
+}
+
+// PositionFor returns the Position value for the given file position p.
+// If adjusted is set, the position may be adjusted by position-altering
+// //line comments; otherwise those comments are ignored.
+// p must be a Pos value in f or NoPos.
+//
+func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) {
+ x := p.index()
+ if p != NoPos {
+ if x < f.base || x > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ pos = f.position(p, adjusted)
+ }
+ return
+}
+
+// Position returns the Position value for the given file position p.
+// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true).
+//
+func (f *File) Position(p Pos) (pos Position) {
+ return f.PositionFor(p, true)
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []index, x index) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ // TODO(gri): Remove this when compilers have caught up.
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
+}
diff --git a/vendor/cuelang.org/go/cue/token/token.go b/vendor/cuelang.org/go/cue/token/token.go
new file mode 100644
index 0000000000..5e15443449
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/token/token.go
@@ -0,0 +1,266 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package token defines constants representing the lexical tokens of the Go
+// programming language and basic operations on tokens (printing, predicates).
+package token // import "cuelang.org/go/cue/token"
+
+import "strconv"
+
+// Token is the set of lexical tokens of the CUE configuration language.
+type Token int
+
+// The list of tokens.
+const (
+ // Special tokens
+ ILLEGAL Token = iota
+ EOF
+ COMMENT
+ ATTRIBUTE // @foo(bar,baz=4)
+
+ literalBeg
+ // Identifiers and basic type literals
+ // (these tokens stand for classes of literals)
+ IDENT // main, _tmp
+ INT // 12_345Mi, 0700, 0xdeadbeef, 1.2M
+ FLOAT // 123.45,
+ // DURATION // 3m4s TODO
+ STRING // "abc"
+ INTERPOLATION // a part of a template string, e.g. `"age: \(`
+ BOTTOM // _|_
+
+ literalEnd
+
+ operatorBeg
+ // Operators and delimiters
+ ADD // +
+ SUB // -
+ MUL // *
+ POW // ^
+ QUO // /
+
+ IQUO // quo
+ IREM // rem
+ IDIV // div
+ IMOD // mod
+
+ AND // &
+ OR // |
+
+ LAND // &&
+ LOR // ||
+
+ BIND // =
+ EQL // ==
+ LSS // <
+ GTR // >
+ NOT // !
+ ARROW // <-
+
+ NEQ // !=
+ LEQ // <=
+ GEQ // >=
+
+ MAT // =~
+ NMAT // !~
+
+ LPAREN // (
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ ELLIPSIS // ...
+
+ RPAREN // )
+ RBRACK // ]
+ RBRACE // }
+ SEMICOLON // ;
+ COLON // :
+ ISA // ::
+ OPTION // ?
+ operatorEnd
+
+ keywordBeg
+
+ IF
+ FOR
+ IN
+ LET
+
+ TRUE
+ FALSE
+ NULL
+
+ keywordEnd
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ INT: "INT",
+ FLOAT: "FLOAT",
+ STRING: "STRING",
+ INTERPOLATION: "INTERPOLATION",
+ ATTRIBUTE: "ATTRIBUTE",
+
+ ADD: "+",
+ SUB: "-",
+ MUL: "*",
+ POW: "^",
+ QUO: "/",
+
+ IQUO: "quo",
+ IREM: "rem",
+ IDIV: "div",
+ IMOD: "mod",
+
+ AND: "&",
+ OR: "|",
+
+ LAND: "&&",
+ LOR: "||",
+
+ BIND: "=",
+ EQL: "==",
+ LSS: "<",
+ GTR: ">",
+ NOT: "!",
+ ARROW: "<-",
+
+ NEQ: "!=",
+ LEQ: "<=",
+ GEQ: ">=",
+
+ MAT: "=~",
+ NMAT: "!~",
+
+ LPAREN: "(",
+ LBRACK: "[",
+ LBRACE: "{",
+ COMMA: ",",
+ PERIOD: ".",
+ ELLIPSIS: "...",
+
+ RPAREN: ")",
+ RBRACK: "]",
+ RBRACE: "}",
+ SEMICOLON: ";",
+ COLON: ":",
+ ISA: "::",
+ OPTION: "?",
+
+ BOTTOM: "_|_",
+
+ FALSE: "false",
+ TRUE: "true",
+ NULL: "null",
+
+ FOR: "for",
+ IF: "if",
+ IN: "in",
+ LET: "let",
+}
+
+// String returns the string corresponding to the token tok.
+// For operators, delimiters, and keywords the string is the actual
+// token character sequence (e.g., for the token ADD, the string is
+// "+"). For all other tokens the string corresponds to the token
+// constant name (e.g. for the token IDENT, the string is "IDENT").
+func (tok Token) String() string {
+ s := ""
+ if 0 <= tok && tok < Token(len(tokens)) {
+ s = tokens[tok]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(tok)) + ")"
+ }
+ return s
+}
+
+// A set of constants for precedence-based expression parsing.
+// Non-operators have lowest precedence, followed by operators
+// starting with precedence 1 up to unary operators. The highest
+// precedence serves as "catch-all" precedence for selector,
+// indexing, and other operator and delimiter tokens.
+const (
+ LowestPrec = lowestPrec
+ UnaryPrec = unaryPrec
+ HighestPrec = highestPrec
+)
+
+const (
+ lowestPrec = 0 // non-operators
+ unaryPrec = 8
+ highestPrec = 9
+)
+
+// Precedence returns the operator precedence of the binary
+// operator op. If op is not a binary operator, the result
+// is LowestPrecedence.
+//
+func (tok Token) Precedence() int {
+ switch tok {
+ case OR:
+ return 1
+ case AND:
+ return 2
+ case LOR:
+ return 3
+ case LAND:
+ return 4
+ case EQL, NEQ, LSS, LEQ, GTR, GEQ, MAT, NMAT:
+ return 5
+ case ADD, SUB:
+ return 6
+ case MUL, QUO, IDIV, IMOD, IQUO, IREM:
+ return 7
+ }
+ return lowestPrec
+}
+
+var keywords map[string]Token
+
+func init() {
+ keywords = make(map[string]Token)
+ for i := keywordBeg + 1; i < keywordEnd; i++ {
+ keywords[tokens[i]] = i
+ }
+}
+
+// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
+//
+func Lookup(ident string) Token {
+ if tok, isKeyword := keywords[ident]; isKeyword {
+ return tok
+ }
+ return IDENT
+}
+
+// Predicates
+
+// IsLiteral returns true for tokens corresponding to identifiers
+// and basic type literals; it returns false otherwise.
+func (tok Token) IsLiteral() bool { return literalBeg < tok && tok < literalEnd }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (tok Token) IsOperator() bool { return operatorBeg < tok && tok < operatorEnd }
+
+// IsKeyword returns true for tokens corresponding to keywords;
+// it returns false otherwise.
+func (tok Token) IsKeyword() bool { return keywordBeg < tok && tok < keywordEnd }
diff --git a/vendor/cuelang.org/go/cue/types.go b/vendor/cuelang.org/go/cue/types.go
new file mode 100644
index 0000000000..11528bef19
--- /dev/null
+++ b/vendor/cuelang.org/go/cue/types.go
@@ -0,0 +1,2536 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cue
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strings"
+
+ "github.com/cockroachdb/apd/v2"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/compile"
+ "cuelang.org/go/internal/core/convert"
+ "cuelang.org/go/internal/core/eval"
+ "cuelang.org/go/internal/core/export"
+ "cuelang.org/go/internal/core/runtime"
+ "cuelang.org/go/internal/core/subsume"
+ "cuelang.org/go/internal/core/validate"
+ "cuelang.org/go/internal/types"
+)
+
+// Kind determines the underlying type of a Value.
+type Kind = adt.Kind
+
+const (
+ // BottomKind represents the bottom value.
+ BottomKind Kind = adt.BottomKind
+
+ // NullKind indicates a null value.
+ NullKind Kind = adt.NullKind
+
+ // BoolKind indicates a boolean value.
+ BoolKind Kind = adt.BoolKind
+
+ // IntKind represents an integral number.
+ IntKind Kind = adt.IntKind
+
+ // FloatKind represents a decimal float point number that cannot be
+ // converted to an integer. The underlying number may still be integral,
+ // but resulting from an operation that enforces the float type.
+ FloatKind Kind = adt.FloatKind
+
+ // StringKind indicates any kind of string.
+ StringKind Kind = adt.StringKind
+
+ // BytesKind is a blob of data.
+ BytesKind Kind = adt.BytesKind
+
+ // StructKind is a kev-value map.
+ StructKind Kind = adt.StructKind
+
+ // ListKind indicates a list of values.
+ ListKind Kind = adt.ListKind
+
+ // _numberKind is used as a implementation detail inside
+ // Kind.String to indicate NumberKind.
+
+ // NumberKind represents any kind of number.
+ NumberKind Kind = IntKind | FloatKind
+
+ // TopKind represents the top value.
+ TopKind Kind = adt.TopKind
+)
+
+// An structValue represents a JSON object.
+//
+// TODO: remove
+type structValue struct {
+ ctx *adt.OpContext
+ v Value
+ obj *adt.Vertex
+ features []adt.Feature
+}
+
+type hiddenStructValue = structValue
+
+// Len reports the number of fields in this struct.
+func (o *hiddenStructValue) Len() int {
+ if o.obj == nil {
+ return 0
+ }
+ return len(o.features)
+}
+
+// At reports the key and value of the ith field, i < o.Len().
+func (o *hiddenStructValue) At(i int) (key string, v Value) {
+ f := o.features[i]
+ return o.v.idx.LabelStr(f), newChildValue(o, i)
+}
+
+func (o *hiddenStructValue) at(i int) (v *adt.Vertex, isOpt bool) {
+ f := o.features[i]
+ arc := o.obj.Lookup(f)
+ if arc == nil {
+ arc = &adt.Vertex{
+ Parent: o.v.v,
+ Label: f,
+ }
+ o.obj.MatchAndInsert(o.ctx, arc)
+ arc.Finalize(o.ctx)
+ isOpt = true
+ }
+ return arc, isOpt
+}
+
+// Lookup reports the field for the given key. The returned Value is invalid
+// if it does not exist.
+func (o *hiddenStructValue) Lookup(key string) Value {
+ f := o.v.idx.StrLabel(key)
+ i := 0
+ len := o.Len()
+ for ; i < len; i++ {
+ if o.features[i] == f {
+ break
+ }
+ }
+ if i == len {
+ x := mkErr(o.v.idx, o.obj, 0, "field not found: %v", key)
+ x.NotExists = true
+ // TODO: more specifically we should test whether the values that
+ // are addressable from the root of the configuration can support the
+ // looked up value. This will avoid false positives such as when
+ // an open literal struct is passed to a builtin.
+ if o.obj.Accept(o.ctx, f) {
+ x.Code = adt.IncompleteError
+ }
+ return newErrValue(o.v, x)
+ }
+ return newChildValue(o, i)
+}
+
+// MarshalJSON returns a valid JSON encoding or reports an error if any of the
+// fields is invalid.
+func (o *structValue) marshalJSON() (b []byte, err errors.Error) {
+ b = append(b, '{')
+ n := o.Len()
+ for i := 0; i < n; i++ {
+ k, v := o.At(i)
+ s, err := json.Marshal(k)
+ if err != nil {
+ return nil, unwrapJSONError(err)
+ }
+ b = append(b, s...)
+ b = append(b, ':')
+ bb, err := json.Marshal(v)
+ if err != nil {
+ return nil, unwrapJSONError(err)
+ }
+ b = append(b, bb...)
+ if i < n-1 {
+ b = append(b, ',')
+ }
+ }
+ b = append(b, '}')
+ return b, nil
+}
+
+var _ errors.Error = &marshalError{}
+
+type marshalError struct {
+ err errors.Error
+ b *adt.Bottom
+}
+
+func toMarshalErr(v Value, b *adt.Bottom) error {
+ return &marshalError{v.toErr(b), b}
+}
+
+func marshalErrf(v Value, src adt.Node, code adt.ErrorCode, msg string, args ...interface{}) error {
+ arguments := append([]interface{}{code, msg}, args...)
+ b := mkErr(v.idx, src, arguments...)
+ return toMarshalErr(v, b)
+}
+
+func (e *marshalError) Error() string {
+ return fmt.Sprintf("cue: marshal error: %v", e.err)
+}
+
+func (e *marshalError) Bottom() *adt.Bottom { return e.b }
+func (e *marshalError) Path() []string { return e.err.Path() }
+func (e *marshalError) Msg() (string, []interface{}) { return e.err.Msg() }
+func (e *marshalError) Position() token.Pos { return e.err.Position() }
+func (e *marshalError) InputPositions() []token.Pos {
+ return e.err.InputPositions()
+}
+
+func unwrapJSONError(err error) errors.Error {
+ switch x := err.(type) {
+ case *json.MarshalerError:
+ return unwrapJSONError(x.Err)
+ case *marshalError:
+ return x
+ case errors.Error:
+ return &marshalError{x, nil}
+ default:
+ return &marshalError{errors.Wrapf(err, token.NoPos, "json error"), nil}
+ }
+}
+
+// An Iterator iterates over values.
+//
+type Iterator struct {
+ val Value
+ idx *runtime.Runtime
+ ctx *adt.OpContext
+ arcs []field
+ p int
+ cur Value
+ f adt.Feature
+ isOpt bool
+}
+
+type hiddenIterator = Iterator
+
+type field struct {
+ arc *adt.Vertex
+ isOptional bool
+}
+
+// Next advances the iterator to the next value and reports whether there was
+// any. It must be called before the first call to Value or Key.
+func (i *Iterator) Next() bool {
+ if i.p >= len(i.arcs) {
+ i.cur = Value{}
+ return false
+ }
+ f := i.arcs[i.p]
+ f.arc.Finalize(i.ctx)
+ p := linkParent(i.val.parent_, i.val.v, f.arc)
+ i.cur = makeValue(i.val.idx, f.arc, p)
+ i.f = f.arc.Label
+ i.isOpt = f.isOptional
+ i.p++
+ return true
+}
+
+// Value returns the current value in the list. It will panic if Next advanced
+// past the last entry.
+func (i *Iterator) Value() Value {
+ return i.cur
+}
+
+// Selector reports the field label of this iteration.
+func (i *Iterator) Selector() Selector {
+ return featureToSel(i.f, i.idx)
+}
+
+// Label reports the label of the value if i iterates over struct fields and ""
+// otherwise.
+//
+//
+// Slated to be deprecated: use i.Selector().String(). Note that this will give
+// more accurate string representations.
+func (i *hiddenIterator) Label() string {
+ if i.f == 0 {
+ return ""
+ }
+ return i.idx.LabelStr(i.f)
+}
+
+// IsHidden reports if a field is hidden from the data model.
+//
+// Deprecated: use i.Selector().PkgPath() != ""
+func (i *hiddenIterator) IsHidden() bool {
+ return i.f.IsHidden()
+}
+
+// IsOptional reports if a field is optional.
+func (i *Iterator) IsOptional() bool {
+ return i.isOpt
+}
+
+// IsDefinition reports if a field is a definition.
+//
+// Deprecated: use i.Selector().IsDefinition()
+func (i *hiddenIterator) IsDefinition() bool {
+ return i.f.IsDef()
+}
+
+// marshalJSON iterates over the list and generates JSON output. HasNext
+// will return false after this operation.
+func marshalList(l *Iterator) (b []byte, err errors.Error) {
+ b = append(b, '[')
+ if l.Next() {
+ for i := 0; ; i++ {
+ x, err := json.Marshal(l.Value())
+ if err != nil {
+ return nil, unwrapJSONError(err)
+ }
+ b = append(b, x...)
+ if !l.Next() {
+ break
+ }
+ b = append(b, ',')
+ }
+ }
+ b = append(b, ']')
+ return b, nil
+}
+
+func (v Value) getNum(k adt.Kind) (*adt.Num, errors.Error) {
+ v, _ = v.Default()
+ ctx := v.ctx()
+ if err := v.checkKind(ctx, k); err != nil {
+ return nil, v.toErr(err)
+ }
+ n, _ := v.eval(ctx).(*adt.Num)
+ return n, nil
+}
+
+// MantExp breaks x into its mantissa and exponent components and returns the
+// exponent. If a non-nil mant argument is provided its value is set to the
+// mantissa of x. The components satisfy x == mant × 10**exp. It returns an
+// error if v is not a number.
+//
+// The components are not normalized. For instance, 2.00 is represented mant ==
+// 200 and exp == -2. Calling MantExp with a nil argument is an efficient way to
+// get the exponent of the receiver.
+func (v Value) MantExp(mant *big.Int) (exp int, err error) {
+ n, err := v.getNum(adt.NumKind)
+ if err != nil {
+ return 0, err
+ }
+ if n.X.Form != 0 {
+ return 0, ErrInfinite
+ }
+ if mant != nil {
+ mant.Set(&n.X.Coeff)
+ if n.X.Negative {
+ mant.Neg(mant)
+ }
+ }
+ return int(n.X.Exponent), nil
+}
+
+// Decimal is for internal use only. The Decimal type that is returned is
+// subject to change.
+func (v hiddenValue) Decimal() (d *internal.Decimal, err error) {
+ n, err := v.getNum(adt.NumKind)
+ if err != nil {
+ return nil, err
+ }
+ return &n.X, nil
+}
+
+// AppendInt appends the string representation of x in the given base to buf and
+// returns the extended buffer, or an error if the underlying number was not
+// an integer.
+func (v Value) AppendInt(buf []byte, base int) ([]byte, error) {
+ i, err := v.Int(nil)
+ if err != nil {
+ return nil, err
+ }
+ return i.Append(buf, base), nil
+}
+
+// AppendFloat appends to buf the string form of the floating-point number x.
+// It returns an error if v is not a number.
+func (v Value) AppendFloat(buf []byte, fmt byte, prec int) ([]byte, error) {
+ n, err := v.getNum(adt.NumKind)
+ if err != nil {
+ return nil, err
+ }
+ ctx := apd.BaseContext
+ nd := int(apd.NumDigits(&n.X.Coeff)) + int(n.X.Exponent)
+ if n.X.Form == apd.Infinite {
+ if n.X.Negative {
+ buf = append(buf, '-')
+ }
+ return append(buf, string('∞')...), nil
+ }
+ if fmt == 'f' && nd > 0 {
+ ctx.Precision = uint32(nd + prec)
+ } else {
+ ctx.Precision = uint32(prec)
+ }
+ var d apd.Decimal
+ ctx.Round(&d, &n.X)
+ return d.Append(buf, fmt), nil
+}
+
+var (
+ // ErrBelow indicates that a value was rounded down in a conversion.
+ ErrBelow = errors.New("value was rounded down")
+
+ // ErrAbove indicates that a value was rounded up in a conversion.
+ ErrAbove = errors.New("value was rounded up")
+
+ // ErrInfinite indicates that a value is infinite.
+ ErrInfinite = errors.New("infinite")
+)
+
+// Int converts the underlying integral number to an big.Int. It reports an
+// error if the underlying value is not an integer type. If a non-nil *Int
+// argument z is provided, Int stores the result in z instead of allocating a
+// new Int.
+func (v Value) Int(z *big.Int) (*big.Int, error) {
+ n, err := v.getNum(adt.IntKind)
+ if err != nil {
+ return nil, err
+ }
+ if z == nil {
+ z = &big.Int{}
+ }
+ if n.X.Exponent != 0 {
+ panic("cue: exponent should always be nil for integer types")
+ }
+ z.Set(&n.X.Coeff)
+ if n.X.Negative {
+ z.Neg(z)
+ }
+ return z, nil
+}
+
+// Int64 converts the underlying integral number to int64. It reports an
+// error if the underlying value is not an integer type or cannot be represented
+// as an int64. The result is (math.MinInt64, ErrAbove) for x < math.MinInt64,
+// and (math.MaxInt64, ErrBelow) for x > math.MaxInt64.
+func (v Value) Int64() (int64, error) {
+ n, err := v.getNum(adt.IntKind)
+ if err != nil {
+ return 0, err
+ }
+ if !n.X.Coeff.IsInt64() {
+ if n.X.Negative {
+ return math.MinInt64, ErrAbove
+ }
+ return math.MaxInt64, ErrBelow
+ }
+ i := n.X.Coeff.Int64()
+ if n.X.Negative {
+ i = -i
+ }
+ return i, nil
+}
+
+// Uint64 converts the underlying integral number to uint64. It reports an
+// error if the underlying value is not an integer type or cannot be represented
+// as a uint64. The result is (0, ErrAbove) for x < 0, and
+// (math.MaxUint64, ErrBelow) for x > math.MaxUint64.
+func (v Value) Uint64() (uint64, error) {
+ n, err := v.getNum(adt.IntKind)
+ if err != nil {
+ return 0, err
+ }
+ if n.X.Negative {
+ return 0, ErrAbove
+ }
+ if !n.X.Coeff.IsUint64() {
+ return math.MaxUint64, ErrBelow
+ }
+ i := n.X.Coeff.Uint64()
+ return i, nil
+}
+
+// trimZeros trims 0's for better JSON respresentations.
+func trimZeros(s string) string {
+ n1 := len(s)
+ s2 := strings.TrimRight(s, "0")
+ n2 := len(s2)
+ if p := strings.IndexByte(s2, '.'); p != -1 {
+ if p == n2-1 {
+ return s[:len(s2)+1]
+ }
+ return s2
+ }
+ if n1-n2 <= 4 {
+ return s
+ }
+ return fmt.Sprint(s2, "e+", n1-n2)
+}
+
+var (
+ smallestPosFloat64 *apd.Decimal
+ smallestNegFloat64 *apd.Decimal
+ maxPosFloat64 *apd.Decimal
+ maxNegFloat64 *apd.Decimal
+)
+
+func init() {
+ const (
+ // math.SmallestNonzeroFloat64: 1 / 2**(1023 - 1 + 52)
+ smallest = "4.940656458412465441765687928682213723651e-324"
+ // math.MaxFloat64: 2**1023 * (2**53 - 1) / 2**52
+ max = "1.797693134862315708145274237317043567981e+308"
+ )
+ ctx := apd.BaseContext
+ ctx.Precision = 40
+
+ var err error
+ smallestPosFloat64, _, err = ctx.NewFromString(smallest)
+ if err != nil {
+ panic(err)
+ }
+ smallestNegFloat64, _, err = ctx.NewFromString("-" + smallest)
+ if err != nil {
+ panic(err)
+ }
+ maxPosFloat64, _, err = ctx.NewFromString(max)
+ if err != nil {
+ panic(err)
+ }
+ maxNegFloat64, _, err = ctx.NewFromString("-" + max)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// Float64 returns the float64 value nearest to x. It reports an error if v is
+// not a number. If x is too small to be represented by a float64 (|x| <
+// math.SmallestNonzeroFloat64), the result is (0, ErrBelow) or (-0, ErrAbove),
+// respectively, depending on the sign of x. If x is too large to be represented
+// by a float64 (|x| > math.MaxFloat64), the result is (+Inf, ErrAbove) or
+// (-Inf, ErrBelow), depending on the sign of x.
+func (v Value) Float64() (float64, error) {
+ n, err := v.getNum(adt.NumKind)
+ if err != nil {
+ return 0, err
+ }
+ if n.X.Negative {
+ if n.X.Cmp(smallestNegFloat64) == 1 {
+ return -0, ErrAbove
+ }
+ if n.X.Cmp(maxNegFloat64) == -1 {
+ return math.Inf(-1), ErrBelow
+ }
+ } else {
+ if n.X.Cmp(smallestPosFloat64) == -1 {
+ return 0, ErrBelow
+ }
+ if n.X.Cmp(maxPosFloat64) == 1 {
+ return math.Inf(1), ErrAbove
+ }
+ }
+ f, _ := n.X.Float64()
+ return f, nil
+}
+
+// Value holds any value, which may be a Boolean, Error, List, Null, Number,
+// Struct, or String.
+type Value struct {
+ idx *runtime.Runtime
+ v *adt.Vertex
+ // Parent keeps track of the parent if the value corresponding to v.Parent
+ // differs, recursively.
+ parent_ *parent
+}
+
+// parent is a distinct type from Value to ensure more type safety: Value
+// is typically used by value, so taking a pointer to it has a high risk
+// or globbering the contents.
+type parent struct {
+ v *adt.Vertex
+ p *parent
+}
+
+func (v Value) parent() Value {
+ switch {
+ case v.v == nil:
+ return Value{}
+ case v.parent_ != nil:
+ return Value{v.idx, v.parent_.v, v.parent_.p}
+ default:
+ return Value{v.idx, v.v.Parent, nil}
+ }
+}
+
+type valueScope Value
+
+func (v valueScope) Vertex() *adt.Vertex { return v.v }
+func (v valueScope) Parent() compile.Scope {
+ p := Value(v).parent()
+ if p.v == nil {
+ return nil
+ }
+ return valueScope(p)
+}
+
+type hiddenValue = Value
+
+// Core is for internal use only.
+func (v hiddenValue) Core(x *types.Value) {
+ x.V = v.v
+ x.R = v.idx
+}
+
+func newErrValue(v Value, b *adt.Bottom) Value {
+ node := &adt.Vertex{BaseValue: b}
+ if v.v != nil {
+ node.Label = v.v.Label
+ node.Parent = v.v.Parent
+ }
+ node.UpdateStatus(adt.Finalized)
+ node.AddConjunct(adt.MakeRootConjunct(nil, b))
+ return makeChildValue(v.parent(), node)
+}
+
+func newVertexRoot(idx *runtime.Runtime, ctx *adt.OpContext, x *adt.Vertex) Value {
+ if ctx != nil {
+ // This is indicative of an zero Value. In some cases this is called
+ // with an error value.
+ x.Finalize(ctx)
+ } else {
+ x.UpdateStatus(adt.Finalized)
+ }
+ return makeValue(idx, x, nil)
+}
+
+func newValueRoot(idx *runtime.Runtime, ctx *adt.OpContext, x adt.Expr) Value {
+ if n, ok := x.(*adt.Vertex); ok {
+ return newVertexRoot(idx, ctx, n)
+ }
+ node := &adt.Vertex{}
+ node.AddConjunct(adt.MakeRootConjunct(nil, x))
+ return newVertexRoot(idx, ctx, node)
+}
+
+func newChildValue(o *structValue, i int) Value {
+ arc, _ := o.at(i)
+ return makeValue(o.v.idx, arc, linkParent(o.v.parent_, o.v.v, arc))
+}
+
+// Dereference reports the value v refers to if v is a reference or v itself
+// otherwise.
+func Dereference(v Value) Value {
+ n := v.v
+ if n == nil || len(n.Conjuncts) != 1 {
+ return v
+ }
+
+ c := n.Conjuncts[0]
+ r, _ := c.Expr().(adt.Resolver)
+ if r == nil {
+ return v
+ }
+
+ ctx := v.ctx()
+ n, b := ctx.Resolve(c.Env, r)
+ if b != nil {
+ return newErrValue(v, b)
+ }
+ n.Finalize(ctx)
+ // NOTE: due to structure sharing, the path of the referred node may end
+ // up different from the one explicitly pointed to. The value will be the
+ // same, but the scope may differ.
+ // TODO(structureshare): see if we can construct the original path. This
+ // only has to be done if structures are being shared.
+ return makeValue(v.idx, n, nil)
+}
+
+func makeValue(idx *runtime.Runtime, v *adt.Vertex, p *parent) Value {
+ if v.Status() == 0 || v.BaseValue == nil {
+ panic(fmt.Sprintf("not properly initialized (state: %v, value: %T)",
+ v.Status(), v.BaseValue))
+ }
+ return Value{idx, v, p}
+}
+
+// makeChildValue makes a new value, of which p is the parent, and links the
+// parent pointer to p if necessary.
+func makeChildValue(p Value, arc *adt.Vertex) Value {
+ return makeValue(p.idx, arc, linkParent(p.parent_, p.v, arc))
+}
+
+// linkParent creates the parent struct for an arc, if necessary.
+//
+// The parent struct is necessary if the parent struct also has a parent struct,
+// or if arc is (structurally) shared and does not have node as a parent.
+func linkParent(p *parent, node, arc *adt.Vertex) *parent {
+ if p == nil && node == arc.Parent {
+ return nil
+ }
+ return &parent{node, p}
+}
+
+func remakeValue(base Value, env *adt.Environment, v adt.Expr) Value {
+ // TODO: right now this is necessary because disjunctions do not have
+ // populated conjuncts.
+ if v, ok := v.(*adt.Vertex); ok && v.Status() >= adt.Partial {
+ return Value{base.idx, v, nil}
+ }
+ n := &adt.Vertex{Label: base.v.Label}
+ n.AddConjunct(adt.MakeRootConjunct(env, v))
+ n = manifest(base.ctx(), n)
+ n.Parent = base.v.Parent
+ return makeChildValue(base.parent(), n)
+}
+
+func remakeFinal(base Value, env *adt.Environment, v adt.Value) Value {
+ n := &adt.Vertex{Parent: base.v.Parent, Label: base.v.Label, BaseValue: v}
+ n.UpdateStatus(adt.Finalized)
+ return makeChildValue(base.parent(), n)
+}
+
+func (v Value) ctx() *adt.OpContext {
+ return newContext(v.idx)
+}
+
+// Eval resolves the references of a value and returns the result.
+// This method is not necessary to obtain concrete values.
+func (v Value) Eval() Value {
+ if v.v == nil {
+ return v
+ }
+ x := v.v
+ // x = eval.FinalizeValue(v.idx.Runtime, v.v)
+ // x.Finalize(v.ctx())
+ x = x.ToDataSingle()
+ return makeValue(v.idx, x, v.parent_)
+ // return remakeValue(v, nil, ctx.value(x))
+}
+
+// Default reports the default value and whether it existed. It returns the
+// normal value if there is no default.
+func (v Value) Default() (Value, bool) {
+ if v.v == nil {
+ return v, false
+ }
+
+ d := v.v.Default()
+ if d == v.v {
+ return v, false
+ }
+ return makeValue(v.idx, d, v.parent_), true
+
+ // d, ok := v.v.Value.(*adt.Disjunction)
+ // if !ok {
+ // return v, false
+ // }
+
+ // var w *adt.Vertex
+
+ // switch d.NumDefaults {
+ // case 0:
+ // return v, false
+
+ // case 1:
+ // w = d.Values[0]
+
+ // default:
+ // x := *v.v
+ // x.Value = &adt.Disjunction{
+ // Src: d.Src,
+ // Values: d.Values[:d.NumDefaults],
+ // NumDefaults: 0,
+ // }
+ // w = &x
+ // }
+
+ // w.Conjuncts = nil
+ // for _, c := range v.v.Conjuncts {
+ // // TODO: preserve field information.
+ // expr, _ := stripNonDefaults(c.Expr())
+ // w.AddConjunct(adt.MakeConjunct(c.Env, expr))
+ // }
+
+ // return makeValue(v.idx, w), true
+
+ // if !stripped {
+ // return v, false
+ // }
+
+ // n := *v.v
+ // n.Conjuncts = conjuncts
+ // return Value{v.idx, &n}, true
+
+ // isDefault := false
+ // for _, c := range v.v.Conjuncts {
+ // if hasDisjunction(c.Expr()) {
+ // isDefault = true
+ // break
+ // }
+ // }
+
+ // if !isDefault {
+ // return v, false
+ // }
+
+ // TODO: record expanded disjunctions in output.
+ // - Rename Disjunction to DisjunctionExpr
+ // - Introduce Disjuncts with Values.
+ // - In Expr introduce Star
+ // - Don't pick default by default?
+
+ // Evaluate the value.
+ // x := eval.FinalizeValue(v.idx.Runtime, v.v)
+ // if b, _ := x.Value.(*adt.Bottom); b != nil { // && b.IsIncomplete() {
+ // return v, false
+ // }
+ // // Finalize and return here.
+ // return Value{v.idx, x}, isDefault
+}
+
+// TODO: this should go: record preexpanded disjunctions in Vertex.
+func hasDisjunction(expr adt.Expr) bool {
+ switch x := expr.(type) {
+ case *adt.DisjunctionExpr:
+ return true
+ case *adt.Conjunction:
+ for _, v := range x.Values {
+ if hasDisjunction(v) {
+ return true
+ }
+ }
+ case *adt.BinaryExpr:
+ switch x.Op {
+ case adt.OrOp:
+ return true
+ case adt.AndOp:
+ return hasDisjunction(x.X) || hasDisjunction(x.Y)
+ }
+ }
+ return false
+}
+
+// TODO: this should go: record preexpanded disjunctions in Vertex.
+func stripNonDefaults(expr adt.Expr) (r adt.Expr, stripped bool) {
+ switch x := expr.(type) {
+ case *adt.DisjunctionExpr:
+ if !x.HasDefaults {
+ return x, false
+ }
+ d := *x
+ d.Values = []adt.Disjunct{}
+ for _, v := range x.Values {
+ if v.Default {
+ d.Values = append(d.Values, v)
+ }
+ }
+ if len(d.Values) == 1 {
+ return d.Values[0].Val, true
+ }
+ return &d, true
+
+ case *adt.BinaryExpr:
+ if x.Op != adt.AndOp {
+ return x, false
+ }
+ a, sa := stripNonDefaults(x.X)
+ b, sb := stripNonDefaults(x.Y)
+ if sa || sb {
+ bin := *x
+ bin.X = a
+ bin.Y = b
+ return &bin, true
+ }
+ return x, false
+
+ default:
+ return x, false
+ }
+}
+
+// Label reports he label used to obtain this value from the enclosing struct.
+//
+// TODO: get rid of this somehow. Probably by including a FieldInfo struct
+// or the like.
+func (v hiddenValue) Label() (string, bool) {
+ if v.v == nil || v.v.Label == 0 {
+ return "", false
+ }
+ return v.idx.LabelStr(v.v.Label), true
+}
+
+// Kind returns the kind of value. It returns BottomKind for atomic values that
+// are not concrete. For instance, it will return BottomKind for the bounds
+// >=0.
+func (v Value) Kind() Kind {
+ if v.v == nil {
+ return BottomKind
+ }
+ c := v.v.BaseValue
+ if !v.v.IsConcrete() {
+ return BottomKind
+ }
+ // TODO: perhaps we should not consider open lists as "incomplete".
+ if v.IncompleteKind() == adt.ListKind && !v.v.IsClosedList() {
+ return BottomKind
+ }
+ return c.Kind()
+}
+
+// IncompleteKind returns a mask of all kinds that this value may be.
+func (v Value) IncompleteKind() Kind {
+ if v.v == nil {
+ return BottomKind
+ }
+ return v.v.Kind()
+}
+
+// MarshalJSON marshalls this value into valid JSON.
+func (v Value) MarshalJSON() (b []byte, err error) {
+ b, err = v.marshalJSON()
+ if err != nil {
+ return nil, unwrapJSONError(err)
+ }
+ return b, nil
+}
+
+func (v Value) marshalJSON() (b []byte, err error) {
+ v, _ = v.Default()
+ if v.v == nil {
+ return json.Marshal(nil)
+ }
+ ctx := newContext(v.idx)
+ x := v.eval(ctx)
+
+ if _, ok := x.(adt.Resolver); ok {
+ return nil, marshalErrf(v, x, adt.IncompleteError, "value %q contains unresolved references", str(ctx, x))
+ }
+ if !adt.IsConcrete(x) {
+ return nil, marshalErrf(v, x, adt.IncompleteError, "cannot convert incomplete value %q to JSON", str(ctx, x))
+ }
+
+ // TODO: implement marshalles in value.
+ switch k := x.Kind(); k {
+ case adt.NullKind:
+ return json.Marshal(nil)
+ case adt.BoolKind:
+ return json.Marshal(x.(*adt.Bool).B)
+ case adt.IntKind, adt.FloatKind, adt.NumKind:
+ b, err := x.(*adt.Num).X.MarshalText()
+ b = bytes.TrimLeft(b, "+")
+ return b, err
+ case adt.StringKind:
+ return json.Marshal(x.(*adt.String).Str)
+ case adt.BytesKind:
+ return json.Marshal(x.(*adt.Bytes).B)
+ case adt.ListKind:
+ i, _ := v.List()
+ return marshalList(&i)
+ case adt.StructKind:
+ obj, err := v.structValData(ctx)
+ if err != nil {
+ return nil, toMarshalErr(v, err)
+ }
+ return obj.marshalJSON()
+ case adt.BottomKind:
+ return nil, toMarshalErr(v, x.(*adt.Bottom))
+ default:
+ return nil, marshalErrf(v, x, 0, "cannot convert value %q of type %T to JSON", str(ctx, x), x)
+ }
+}
+
+// Syntax converts the possibly partially evaluated value into syntax. This
+// can use used to print the value with package format.
+func (v Value) Syntax(opts ...Option) ast.Node {
+ // TODO: the default should ideally be simplified representation that
+ // exactly represents the value. The latter can currently only be
+ // ensured with Raw().
+ if v.v == nil {
+ return nil
+ }
+ var o options = getOptions(opts)
+ // var inst *Instance
+
+ p := export.Profile{
+ Simplify: !o.raw,
+ TakeDefaults: o.final,
+ ShowOptional: !o.omitOptional && !o.concrete,
+ ShowDefinitions: !o.omitDefinitions && !o.concrete,
+ ShowHidden: !o.omitHidden && !o.concrete,
+ ShowAttributes: !o.omitAttrs,
+ ShowDocs: o.docs,
+ ShowErrors: o.showErrors,
+ }
+
+ pkgID := v.instance().ID()
+
+ bad := func(name string, err error) ast.Node {
+ const format = `"%s: internal error
+Error: %s
+
+Profile:
+%#v
+
+Value:
+%v
+
+You could file a bug with the above information at:
+ https://cuelang.org/issues/new?assignees=&labels=NeedsInvestigation&template=bug_report.md&title=.
+`
+ cg := &ast.CommentGroup{Doc: true}
+ msg := fmt.Sprintf(format, name, err, p, v)
+ for _, line := range strings.Split(msg, "\n") {
+ cg.List = append(cg.List, &ast.Comment{Text: "// " + line})
+ }
+ x := &ast.BadExpr{}
+ ast.AddComment(x, cg)
+ return x
+ }
+
+ // var expr ast.Expr
+ var err error
+ var f *ast.File
+ if o.concrete || o.final || o.resolveReferences {
+ // inst = v.instance()
+ var expr ast.Expr
+ expr, err = p.Value(v.idx, pkgID, v.v)
+ if err != nil {
+ return bad(`"cuelang.org/go/internal/core/export".Value`, err)
+ }
+
+ // This introduces gratuitous unshadowing!
+ f, err = astutil.ToFile(expr)
+ if err != nil {
+ return bad(`"cuelang.org/go/ast/astutil".ToFile`, err)
+ }
+ // return expr
+ } else {
+ f, err = p.Def(v.idx, pkgID, v.v)
+ if err != nil {
+ return bad(`"cuelang.org/go/internal/core/export".Def`, err)
+ }
+ }
+
+outer:
+ for _, d := range f.Decls {
+ switch d.(type) {
+ case *ast.Package, *ast.ImportDecl:
+ return f
+ case *ast.CommentGroup, *ast.Attribute:
+ default:
+ break outer
+ }
+ }
+
+ if len(f.Decls) == 1 {
+ if e, ok := f.Decls[0].(*ast.EmbedDecl); ok {
+ return e.Expr
+ }
+ }
+ return &ast.StructLit{
+ Elts: f.Decls,
+ }
+}
+
+// Doc returns all documentation comments associated with the field from which
+// the current value originates.
+func (v Value) Doc() []*ast.CommentGroup {
+ if v.v == nil {
+ return nil
+ }
+ return export.ExtractDoc(v.v)
+}
+
+// Split returns a list of values from which v originated such that
+// the unification of all these values equals v and for all returned values.
+// It will also split unchecked unifications (embeddings), so unifying the
+// split values may fail if actually unified.
+// Source returns a non-nil value.
+//
+// Deprecated: use Expr.
+func (v hiddenValue) Split() []Value {
+ if v.v == nil {
+ return nil
+ }
+ a := []Value{}
+ for _, x := range v.v.Conjuncts {
+ a = append(a, remakeValue(v, x.Env, x.Expr()))
+ }
+ return a
+}
+
+// Source returns the original node for this value. The return value may not
+// be a syntax.Expr. For instance, a struct kind may be represented by a
+// struct literal, a field comprehension, or a file. It returns nil for
+// computed nodes. Use Split to get all source values that apply to a field.
+func (v Value) Source() ast.Node {
+ if v.v == nil {
+ return nil
+ }
+ if len(v.v.Conjuncts) == 1 {
+ return v.v.Conjuncts[0].Source()
+ }
+ return v.v.Value().Source()
+}
+
+// Err returns the error represented by v or nil v is not an error.
+func (v Value) Err() error {
+ if err := v.checkKind(v.ctx(), adt.BottomKind); err != nil {
+ return v.toErr(err)
+ }
+ return nil
+}
+
+// Pos returns position information.
+//
+// Use v.Expr to get positions for all conjuncts and disjuncts.
+func (v Value) Pos() token.Pos {
+ if v.v == nil {
+ return token.NoPos
+ }
+
+ if src := v.Source(); src != nil {
+ if pos := src.Pos(); pos != token.NoPos {
+ return pos
+ }
+ }
+ // Pick the most-concrete field.
+ var p token.Pos
+ for _, c := range v.v.Conjuncts {
+ x := c.Elem()
+ pp := pos(x)
+ if pp == token.NoPos {
+ continue
+ }
+ p = pp
+ // Prefer struct conjuncts with actual fields.
+ if s, ok := x.(*adt.StructLit); ok && len(s.Fields) > 0 {
+ break
+ }
+ }
+ return p
+}
+
+// TODO: IsFinal: this value can never be changed.
+
+// IsClosed reports whether a list of struct is closed. It reports false when
+// when the value is not a list or struct.
+//
+// Deprecated: use Allows(AnyString) and Allows(AnyIndex) or Kind/IncompleteKind.
+func (v hiddenValue) IsClosed() bool {
+ if v.v == nil {
+ return false
+ }
+ switch v.Kind() {
+ case ListKind:
+ return v.v.IsClosedList()
+ case StructKind:
+ return !v.Allows(AnyString)
+ }
+ return false
+}
+
+// Allows reports whether a field with the given selector could be added to v.
+//
+// Allows does not take into account validators like list.MaxItems(4). This may
+// change in the future.
+func (v Value) Allows(sel Selector) bool {
+ c := v.ctx()
+ f := sel.sel.feature(c)
+ return v.v.Accept(c, f)
+}
+
+// IsConcrete reports whether the current value is a concrete scalar value
+// (not relying on default values), a terminal error, a list, or a struct.
+// It does not verify that values of lists or structs are concrete themselves.
+// To check whether there is a concrete default, use v.Default().IsConcrete().
+func (v Value) IsConcrete() bool {
+ if v.v == nil {
+ return false // any is neither concrete, not a list or struct.
+ }
+ if b, ok := v.v.BaseValue.(*adt.Bottom); ok {
+ return !b.IsIncomplete()
+ }
+ if !adt.IsConcrete(v.v) {
+ return false
+ }
+ if v.IncompleteKind() == adt.ListKind && !v.v.IsClosedList() {
+ return false
+ }
+ return true
+}
+
+// // Deprecated: IsIncomplete
+// //
+// // It indicates that the value cannot be fully evaluated due to
+// // insufficient information.
+// func (v Value) IsIncomplete() bool {
+// panic("deprecated")
+// }
+
+// Exists reports whether this value existed in the configuration.
+func (v Value) Exists() bool {
+ if v.v == nil {
+ return false
+ }
+ if err, ok := v.v.BaseValue.(*adt.Bottom); ok {
+ return !err.NotExists
+ }
+ return true
+}
+
+func (v Value) checkKind(ctx *adt.OpContext, want adt.Kind) *adt.Bottom {
+ if v.v == nil {
+ return errNotExists
+ }
+ // TODO: use checkKind
+ x := v.eval(ctx)
+ if b, ok := x.(*adt.Bottom); ok {
+ return b
+ }
+ k := x.Kind()
+ if want != adt.BottomKind {
+ if k&want == adt.BottomKind {
+ return mkErr(v.idx, x, "cannot use value %v (type %s) as %s",
+ ctx.Str(x), k, want)
+ }
+ if !adt.IsConcrete(x) {
+ return mkErr(v.idx, x, adt.IncompleteError, "non-concrete value %v", k)
+ }
+ }
+ return nil
+}
+
+func makeInt(v Value, x int64) Value {
+ n := &adt.Num{K: adt.IntKind}
+ n.X.SetInt64(int64(x))
+ return remakeFinal(v, nil, n)
+}
+
+// Len returns the number of items of the underlying value.
+// For lists it reports the capacity of the list. For structs it indicates the
+// number of fields, for bytes the number of bytes.
+func (v Value) Len() Value {
+ if v.v != nil {
+ switch x := v.eval(v.ctx()).(type) {
+ case *adt.Vertex:
+ if x.IsList() {
+ n := &adt.Num{K: adt.IntKind}
+ n.X.SetInt64(int64(len(x.Elems())))
+ if x.IsClosedList() {
+ return remakeFinal(v, nil, n)
+ }
+ // Note: this HAS to be a Conjunction value and cannot be
+ // an adt.BinaryExpr, as the expressions would be considered
+ // to be self-contained and unresolvable when evaluated
+ // (can never become concrete).
+ c := &adt.Conjunction{Values: []adt.Value{
+ &adt.BasicType{K: adt.IntKind},
+ &adt.BoundValue{Op: adt.GreaterEqualOp, Value: n},
+ }}
+ return remakeFinal(v, nil, c)
+
+ }
+ case *adt.Bytes:
+ return makeInt(v, int64(len(x.B)))
+ case *adt.String:
+ return makeInt(v, int64(len([]rune(x.Str))))
+ }
+ }
+ const msg = "len not supported for type %v"
+ return remakeValue(v, nil, mkErr(v.idx, v.v, msg, v.Kind()))
+
+}
+
+// Elem returns the value of undefined element types of lists and structs.
+//
+// Deprecated: use LookupPath in combination with "AnyString" or "AnyIndex".
+func (v hiddenValue) Elem() (Value, bool) {
+ sel := AnyString
+ if v.v.IsList() {
+ sel = AnyIndex
+ }
+ x := v.LookupPath(MakePath(sel))
+ return x, x.Exists()
+}
+
+// List creates an iterator over the values of a list or reports an error if
+// v is not a list.
+func (v Value) List() (Iterator, error) {
+ v, _ = v.Default()
+ ctx := v.ctx()
+ if err := v.checkKind(ctx, adt.ListKind); err != nil {
+ return Iterator{idx: v.idx, ctx: ctx}, v.toErr(err)
+ }
+ arcs := []field{}
+ for _, a := range v.v.Elems() {
+ if a.Label.IsInt() {
+ arcs = append(arcs, field{arc: a})
+ }
+ }
+ return Iterator{idx: v.idx, ctx: ctx, val: v, arcs: arcs}, nil
+}
+
+// Null reports an error if v is not null.
+func (v Value) Null() error {
+ v, _ = v.Default()
+ if err := v.checkKind(v.ctx(), adt.NullKind); err != nil {
+ return v.toErr(err)
+ }
+ return nil
+}
+
+// // IsNull reports whether v is null.
+// func (v Value) IsNull() bool {
+// return v.Null() == nil
+// }
+
+// Bool returns the bool value of v or false and an error if v is not a boolean.
+func (v Value) Bool() (bool, error) {
+ v, _ = v.Default()
+ ctx := v.ctx()
+ if err := v.checkKind(ctx, adt.BoolKind); err != nil {
+ return false, v.toErr(err)
+ }
+ return v.eval(ctx).(*adt.Bool).B, nil
+}
+
+// String returns the string value if v is a string or an error otherwise.
+func (v Value) String() (string, error) {
+ v, _ = v.Default()
+ ctx := v.ctx()
+ if err := v.checkKind(ctx, adt.StringKind); err != nil {
+ return "", v.toErr(err)
+ }
+ return v.eval(ctx).(*adt.String).Str, nil
+}
+
+// Bytes returns a byte slice if v represents a list of bytes or an error
+// otherwise.
+func (v Value) Bytes() ([]byte, error) {
+ v, _ = v.Default()
+ ctx := v.ctx()
+ switch x := v.eval(ctx).(type) {
+ case *adt.Bytes:
+ return append([]byte(nil), x.B...), nil
+ case *adt.String:
+ return []byte(x.Str), nil
+ }
+ return nil, v.toErr(v.checkKind(ctx, adt.BytesKind|adt.StringKind))
+}
+
+// Reader returns a new Reader if v is a string or bytes type and an error
+// otherwise.
+func (v hiddenValue) Reader() (io.Reader, error) {
+ v, _ = v.Default()
+ ctx := v.ctx()
+ switch x := v.eval(ctx).(type) {
+ case *adt.Bytes:
+ return bytes.NewReader(x.B), nil
+ case *adt.String:
+ return strings.NewReader(x.Str), nil
+ }
+ return nil, v.toErr(v.checkKind(ctx, adt.StringKind|adt.BytesKind))
+}
+
+// TODO: distinguish between optional, hidden, etc. Probably the best approach
+// is to mark options in context and have a single function for creating
+// a structVal.
+
+// structVal returns an structVal or an error if v is not a struct.
+func (v Value) structValData(ctx *adt.OpContext) (structValue, *adt.Bottom) {
+ return v.structValOpts(ctx, options{
+ omitHidden: true,
+ omitDefinitions: true,
+ omitOptional: true,
+ })
+}
+
+func (v Value) structValFull(ctx *adt.OpContext) (structValue, *adt.Bottom) {
+ return v.structValOpts(ctx, options{allowScalar: true})
+}
+
+// structVal returns an structVal or an error if v is not a struct.
+func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err *adt.Bottom) {
+ v, _ = v.Default()
+
+ obj := v.v
+
+ switch b, ok := v.v.BaseValue.(*adt.Bottom); {
+ case ok && b.IsIncomplete() && !o.concrete && !o.final:
+
+ // TODO:
+ // case o.allowScalar, !o.omitHidden, !o.omitDefinitions:
+ // Allow scalar values if hidden or definition fields are requested?
+ case o.allowScalar:
+ default:
+ obj, err = v.getStruct()
+ if err != nil {
+ return structValue{}, err
+ }
+ }
+
+ features := export.VertexFeatures(ctx, obj)
+
+ k := 0
+ for _, f := range features {
+ if f.IsDef() && (o.omitDefinitions || o.concrete) {
+ continue
+ }
+ if f.IsHidden() && o.omitHidden {
+ continue
+ }
+ if arc := obj.Lookup(f); arc == nil {
+ if o.omitOptional {
+ continue
+ }
+ // ensure it really exists.
+ v := adt.Vertex{
+ Parent: obj,
+ Label: f,
+ }
+ obj.MatchAndInsert(ctx, &v)
+ if len(v.Conjuncts) == 0 {
+ continue
+ }
+ }
+ features[k] = f
+ k++
+ }
+ features = features[:k]
+ return structValue{ctx, v, obj, features}, nil
+}
+
+// Struct returns the underlying struct of a value or an error if the value
+// is not a struct.
+func (v hiddenValue) Struct() (*Struct, error) {
+ // TODO: deprecate
+ ctx := v.ctx()
+ obj, err := v.structValOpts(ctx, options{})
+ if err != nil {
+ return nil, v.toErr(err)
+ }
+ return &Struct{obj}, nil
+}
+
+func (v Value) getStruct() (*adt.Vertex, *adt.Bottom) {
+ ctx := v.ctx()
+ if err := v.checkKind(ctx, adt.StructKind); err != nil {
+ if !err.ChildError {
+ return nil, err
+ }
+ }
+ return v.v, nil
+}
+
+// Struct represents a CUE struct value.
+type Struct struct {
+ structValue
+}
+
+type hiddenStruct = Struct
+
+// FieldInfo contains information about a struct field.
+type FieldInfo struct {
+ Selector string
+ Name string // Deprecated: use Selector
+ Pos int
+ Value Value
+
+ IsDefinition bool
+ IsOptional bool
+ IsHidden bool
+}
+
+func (s *hiddenStruct) Len() int {
+ return s.structValue.Len()
+}
+
+// field reports information about the ith field, i < o.Len().
+func (s *hiddenStruct) Field(i int) FieldInfo {
+ a, opt := s.at(i)
+ ctx := s.v.ctx()
+
+ v := makeChildValue(s.v, a)
+ name := s.v.idx.LabelStr(a.Label)
+ str := a.Label.SelectorString(ctx)
+ return FieldInfo{str, name, i, v, a.Label.IsDef(), opt, a.Label.IsHidden()}
+}
+
+// FieldByName looks up a field for the given name. If isIdent is true, it will
+// look up a definition or hidden field (starting with `_` or `_#`). Otherwise
+// it interprets name as an arbitrary string for a regular field.
+func (s *hiddenStruct) FieldByName(name string, isIdent bool) (FieldInfo, error) {
+ f := s.v.idx.Label(name, isIdent)
+ for i, a := range s.features {
+ if a == f {
+ return s.Field(i), nil
+ }
+ }
+ return FieldInfo{}, errNotFound
+}
+
+// Fields creates an iterator over the Struct's fields.
+func (s *hiddenStruct) Fields(opts ...Option) *Iterator {
+ iter, _ := s.v.Fields(opts...)
+ return iter
+}
+
+// Fields creates an iterator over v's fields if v is a struct or an error
+// otherwise.
+func (v Value) Fields(opts ...Option) (*Iterator, error) {
+ o := options{omitDefinitions: true, omitHidden: true, omitOptional: true}
+ o.updateOptions(opts)
+ ctx := v.ctx()
+ obj, err := v.structValOpts(ctx, o)
+ if err != nil {
+ return &Iterator{idx: v.idx, ctx: ctx}, v.toErr(err)
+ }
+
+ arcs := []field{}
+ for i := range obj.features {
+ arc, isOpt := obj.at(i)
+ arcs = append(arcs, field{arc: arc, isOptional: isOpt})
+ }
+ return &Iterator{idx: v.idx, ctx: ctx, val: v, arcs: arcs}, nil
+}
+
+// Lookup reports the value at a path starting from v. The empty path returns v
+// itself.
+//
+// The Exists() method can be used to verify if the returned value existed.
+// Lookup cannot be used to look up hidden or optional fields or definitions.
+//
+// Deprecated: use LookupPath. At some point before v1.0.0, this method will
+// be removed to be reused eventually for looking up a selector.
+func (v hiddenValue) Lookup(path ...string) Value {
+ ctx := v.ctx()
+ for _, k := range path {
+ // TODO(eval) TODO(error): always search in full data and change error
+ // message if a field is found but is of the incorrect type.
+ obj, err := v.structValData(ctx)
+ if err != nil {
+ // TODO: return a Value at the same location and a new error?
+ return newErrValue(v, err)
+ }
+ v = obj.Lookup(k)
+ }
+ return v
+}
+
+// Path returns the path to this value from the root of an Instance.
+//
+// This is currently only defined for values that have a fixed path within
+// a configuration, and thus not those that are derived from Elem, Template,
+// or programmatically generated values such as those returned by Unify.
+func (v Value) Path() Path {
+ if v.v == nil {
+ return Path{}
+ }
+ return Path{path: appendPath(nil, v)}
+}
+
+// Path computes the sequence of Features leading from the root to of the
+// instance to this Vertex.
+func appendPath(a []Selector, v Value) []Selector {
+ if p := v.parent(); p.v != nil {
+ a = appendPath(a, p)
+ }
+
+ if v.v.Label == 0 {
+ // A Label may be 0 for programmatically inserted nodes.
+ return a
+ }
+
+ f := v.v.Label
+ if index := f.Index(); index == adt.MaxIndex {
+ return append(a, Selector{anySelector(f)})
+ }
+
+ var sel selector
+ switch f.Typ() {
+ case adt.IntLabel:
+ sel = indexSelector(f)
+ case adt.DefinitionLabel:
+ sel = definitionSelector(f.SelectorString(v.idx))
+
+ case adt.HiddenDefinitionLabel, adt.HiddenLabel:
+ sel = scopedSelector{
+ name: f.IdentString(v.idx),
+ pkg: f.PkgID(v.idx),
+ }
+
+ case adt.StringLabel:
+ sel = stringSelector(f.StringValue(v.idx))
+ }
+ return append(a, Selector{sel})
+}
+
+// LookupDef is equal to LookupPath(MakePath(Def(name))).
+//
+// Deprecated: use LookupPath.
+func (v hiddenValue) LookupDef(name string) Value {
+ return v.LookupPath(MakePath(Def(name)))
+}
+
+var errNotFound = errors.Newf(token.NoPos, "field not found")
+
+// FieldByName looks up a field for the given name. If isIdent is true, it will
+// look up a definition or hidden field (starting with `_` or `_#`). Otherwise
+// it interprets name as an arbitrary string for a regular field.
+//
+// Deprecated: use LookupPath.
+func (v hiddenValue) FieldByName(name string, isIdent bool) (f FieldInfo, err error) {
+ s, err := v.Struct()
+ if err != nil {
+ return f, err
+ }
+ return s.FieldByName(name, isIdent)
+}
+
+// LookupField reports information about a field of v.
+//
+// Deprecated: use LookupPath
+func (v hiddenValue) LookupField(name string) (FieldInfo, error) {
+ s, err := v.Struct()
+ if err != nil {
+ // TODO: return a Value at the same location and a new error?
+ return FieldInfo{}, err
+ }
+ f, err := s.FieldByName(name, true)
+ if err != nil {
+ return f, err
+ }
+ if f.IsHidden {
+ return f, errNotFound
+ }
+ return f, err
+}
+
+// TODO: expose this API?
+//
+// // EvalExpr evaluates an expression within the scope of v, which must be
+// // a struct.
+// //
+// // Expressions may refer to builtin packages if they can be uniquely identified.
+// func (v Value) EvalExpr(expr ast.Expr) Value {
+// ctx := v.ctx()
+// result := evalExpr(ctx, v.eval(ctx), expr)
+// return newValueRoot(ctx, result)
+// }
+
+// Fill creates a new value by unifying v with the value of x at the given path.
+//
+// Values may be any Go value that can be converted to CUE, an ast.Expr or
+// a Value. In the latter case, it will panic if the Value is not from the same
+// Runtime.
+//
+// Any reference in v referring to the value at the given path will resolve
+// to x in the newly created value. The resulting value is not validated.
+//
+// Deprecated: use FillPath.
+func (v hiddenValue) Fill(x interface{}, path ...string) Value {
+ if v.v == nil {
+ return v
+ }
+ selectors := make([]Selector, len(path))
+ for i, p := range path {
+ selectors[i] = Str(p)
+ }
+ return v.FillPath(MakePath(selectors...), x)
+}
+
+// FillPath creates a new value by unifying v with the value of x at the given
+// path.
+//
+// If x is an cue/ast.Expr, it will be evaluated within the context of the
+// given path: identifiers that are not resolved within the expression are
+// resolved as if they were defined at the path position.
+//
+// If x is a Value, it will be used as is. It panics if x is not created
+// from the same Runtime as v.
+//
+// Otherwise, the given Go value will be converted to CUE using the same rules
+// as Context.Encode.
+//
+// Any reference in v referring to the value at the given path will resolve to x
+// in the newly created value. The resulting value is not validated.
+//
+func (v Value) FillPath(p Path, x interface{}) Value {
+ if v.v == nil {
+ // TODO: panic here?
+ return v
+ }
+ ctx := v.ctx()
+ if err := p.Err(); err != nil {
+ return newErrValue(v, mkErr(v.idx, nil, 0, "invalid path: %v", err))
+ }
+ var expr adt.Expr
+ switch x := x.(type) {
+ case Value:
+ if v.idx != x.idx {
+ panic("values are not from the same runtime")
+ }
+ expr = x.v
+ case ast.Expr:
+ n := getScopePrefix(v, p)
+ // TODO: inject import path of current package?
+ expr = resolveExpr(ctx, n, x)
+ default:
+ expr = convert.GoValueToValue(ctx, x, true)
+ }
+ for i := len(p.path) - 1; i >= 0; i-- {
+ switch sel := p.path[i].sel; {
+ case sel == AnyString.sel:
+ expr = &adt.StructLit{Decls: []adt.Decl{
+ &adt.BulkOptionalField{
+ Filter: &adt.BasicType{K: adt.StringKind},
+ Value: expr,
+ },
+ }}
+
+ case sel == anyIndex.sel:
+ expr = &adt.ListLit{Elems: []adt.Elem{
+ &adt.Ellipsis{Value: expr},
+ }}
+
+ case sel == anyDefinition.sel:
+ expr = &adt.Bottom{Err: errors.Newf(token.NoPos,
+ "AnyDefinition not supported")}
+
+ case sel.kind() == adt.IntLabel:
+ i := sel.feature(ctx.Runtime).Index()
+ list := &adt.ListLit{}
+ any := &adt.Top{}
+ // TODO(perf): make this a constant thing. This will be possible with the query extension.
+ for k := 0; k < i; k++ {
+ list.Elems = append(list.Elems, any)
+ }
+ list.Elems = append(list.Elems, expr, &adt.Ellipsis{})
+ expr = list
+
+ default:
+ var d adt.Decl
+ if sel.optional() {
+ d = &adt.OptionalField{
+ Label: sel.feature(v.idx),
+ Value: expr,
+ }
+ } else {
+ d = &adt.Field{
+ Label: sel.feature(v.idx),
+ Value: expr,
+ }
+ }
+ expr = &adt.StructLit{Decls: []adt.Decl{d}}
+ }
+ }
+ n := &adt.Vertex{}
+ n.AddConjunct(adt.MakeRootConjunct(nil, expr))
+ n.Finalize(ctx)
+ w := makeValue(v.idx, n, v.parent_)
+ return v.Unify(w)
+}
+
+// Template returns a function that represents the template definition for a
+// struct in a configuration file. It returns nil if v is not a struct kind or
+// if there is no template associated with the struct.
+//
+// The returned function returns the value that would be unified with field
+// given its name.
+//
+// Deprecated: use LookupPath in combination with using optional selectors.
+func (v hiddenValue) Template() func(label string) Value {
+ if v.v == nil {
+ return nil
+ }
+
+ types := v.v.OptionalTypes()
+ if types&(adt.HasAdditional|adt.HasPattern) == 0 {
+ return nil
+ }
+
+ return func(label string) Value {
+ return v.LookupPath(MakePath(Str(label).Optional()))
+ }
+}
+
+// Subsume reports nil when w is an instance of v or an error otherwise.
+//
+// Without options, the entire value is considered for assumption, which means
+// Subsume tests whether v is a backwards compatible (newer) API version of w.
+//
+// Use the Final option to check subsumption if a w is known to be final, and
+// should assumed to be closed.
+//
+// Use the Raw option to do a low-level subsumption, taking defaults into
+// account.
+//
+// Value v and w must be obtained from the same build. TODO: remove this
+// requirement.
+func (v Value) Subsume(w Value, opts ...Option) error {
+ o := getOptions(opts)
+ p := subsume.CUE
+ switch {
+ case o.final && o.ignoreClosedness:
+ p = subsume.FinalOpen
+ case o.final:
+ p = subsume.Final
+ case o.ignoreClosedness:
+ p = subsume.API
+ }
+ if !o.raw {
+ p.Defaults = true
+ }
+ ctx := v.ctx()
+ return p.Value(ctx, v.v, w.v)
+}
+
+// Deprecated: use Subsume.
+//
+// Subsumes reports whether w is an instance of v.
+//
+// Without options, Subsumes checks whether v is a backwards compatbile schema
+// of w.
+//
+// By default, Subsumes tests whether two values are compatible
+// Value v and w must be obtained from the same build.
+// TODO: remove this requirement.
+func (v hiddenValue) Subsumes(w Value) bool {
+ ctx := v.ctx()
+ p := subsume.Profile{Defaults: true}
+ return p.Check(ctx, v.v, w.v)
+}
+
+func allowed(ctx *adt.OpContext, parent, n *adt.Vertex) *adt.Bottom {
+ if !parent.IsClosedList() && !parent.IsClosedStruct() {
+ return nil
+ }
+
+ for _, a := range n.Arcs {
+ if !parent.Accept(ctx, a.Label) {
+ defer ctx.PopArc(ctx.PushArc(parent))
+ label := a.Label.SelectorString(ctx)
+ parent.Accept(ctx, a.Label)
+ return ctx.NewErrf("field not allowed: %s", label)
+ }
+ }
+ return nil
+}
+
+func addConjuncts(dst, src *adt.Vertex) {
+ c := adt.MakeRootConjunct(nil, src)
+ if src.Closed {
+ var root adt.CloseInfo
+ c.CloseInfo = root.SpawnRef(src, src.Closed, nil)
+ }
+ dst.AddConjunct(c)
+}
+
+// Unify reports the greatest lower bound of v and w.
+//
+// Value v and w must be obtained from the same build.
+// TODO: remove this requirement.
+func (v Value) Unify(w Value) Value {
+ if v.v == nil {
+ return w
+ }
+ if w.v == nil || w.v == v.v {
+ return v
+ }
+
+ n := &adt.Vertex{}
+ addConjuncts(n, v.v)
+ addConjuncts(n, w.v)
+
+ ctx := newContext(v.idx)
+ n.Finalize(ctx)
+
+ n.Parent = v.v.Parent
+ n.Label = v.v.Label
+ n.Closed = v.v.Closed || w.v.Closed
+
+ if err := n.Err(ctx, adt.Finalized); err != nil {
+ return makeValue(v.idx, n, v.parent_)
+ }
+ if err := allowed(ctx, v.v, n); err != nil {
+ return newErrValue(w, err)
+ }
+ if err := allowed(ctx, w.v, n); err != nil {
+ return newErrValue(v, err)
+ }
+
+ return makeValue(v.idx, n, v.parent_)
+}
+
+// UnifyAccept is as v.Unify(w), but will disregard any field that is allowed
+// in the Value accept.
+func (v Value) UnifyAccept(w Value, accept Value) Value {
+ if v.v == nil {
+ return w
+ }
+ if w.v == nil {
+ return v
+ }
+ if accept.v == nil {
+ panic("accept must exist")
+ }
+
+ n := &adt.Vertex{}
+ n.AddConjunct(adt.MakeRootConjunct(nil, v.v))
+ n.AddConjunct(adt.MakeRootConjunct(nil, w.v))
+
+ ctx := newContext(v.idx)
+ n.Finalize(ctx)
+
+ n.Parent = v.v.Parent
+ n.Label = v.v.Label
+
+ if err := n.Err(ctx, adt.Finalized); err != nil {
+ return makeValue(v.idx, n, v.parent_)
+ }
+ if err := allowed(ctx, accept.v, n); err != nil {
+ return newErrValue(accept, err)
+ }
+
+ return makeValue(v.idx, n, v.parent_)
+}
+
+// Equals reports whether two values are equal, ignoring optional fields.
+// The result is undefined for incomplete values.
+func (v Value) Equals(other Value) bool {
+ if v.v == nil || other.v == nil {
+ return false
+ }
+ return adt.Equal(v.ctx(), v.v, other.v, 0)
+}
+
+func (v Value) instance() *Instance {
+ if v.v == nil {
+ return nil
+ }
+ return getImportFromNode(v.idx, v.v)
+}
+
+// Reference returns the instance and path referred to by this value such that
+// inst.Lookup(path) resolves to the same value, or no path if this value is not
+// a reference. If a reference contains index selection (foo[bar]), it will
+// only return a reference if the index resolves to a concrete value.
+//
+// Deprecated: use ReferencePath
+func (v hiddenValue) Reference() (inst *Instance, path []string) {
+ root, p := v.ReferencePath()
+ if !root.Exists() {
+ return nil, nil
+ }
+
+ inst = getImportFromNode(v.idx, root.v)
+ for _, sel := range p.Selectors() {
+ switch x := sel.sel.(type) {
+ case stringSelector:
+ path = append(path, string(x))
+ default:
+ path = append(path, sel.String())
+ }
+ }
+
+ return inst, path
+}
+
+// ReferencePath returns the value and path referred to by this value such that
+// value.LookupPath(path) resolves to the same value, or no path if this value
+// is not a reference.
+func (v Value) ReferencePath() (root Value, p Path) {
+ // TODO: don't include references to hidden fields.
+ if v.v == nil || len(v.v.Conjuncts) != 1 {
+ return Value{}, Path{}
+ }
+ ctx := v.ctx()
+ c := v.v.Conjuncts[0]
+
+ x, path := reference(v.idx, ctx, c.Env, c.Expr())
+ if x == nil {
+ return Value{}, Path{}
+ }
+ // NOTE: due to structure sharing, the path of the referred node may end
+ // up different from the one explicitly pointed to. The value will be the
+ // same, but the scope may differ.
+ // TODO(structureshare): see if we can construct the original path. This
+ // only has to be done if structures are being shared.
+ return makeValue(v.idx, x, nil), Path{path: path}
+}
+
+func reference(rt *runtime.Runtime, c *adt.OpContext, env *adt.Environment, r adt.Expr) (inst *adt.Vertex, path []Selector) {
+ ctx := c
+ defer ctx.PopState(ctx.PushState(env, r.Source()))
+
+ switch x := r.(type) {
+ // TODO: do we need to handle Vertex as well, in case this is hard-wired?
+ // Probably not, as this results from dynamic content.
+
+ case *adt.NodeLink:
+ // TODO: consider getting rid of NodeLink.
+ inst, path = mkPath(rt, nil, x.Node)
+
+ case *adt.FieldReference:
+ env := ctx.Env(x.UpCount)
+ inst, path = mkPath(rt, nil, env.Vertex)
+ path = appendSelector(path, featureToSel(x.Label, rt))
+
+ case *adt.LabelReference:
+ env := ctx.Env(x.UpCount)
+ return mkPath(rt, nil, env.Vertex)
+
+ case *adt.DynamicReference:
+ env := ctx.Env(x.UpCount)
+ inst, path = mkPath(rt, nil, env.Vertex)
+ v, _ := ctx.Evaluate(env, x.Label)
+ path = appendSelector(path, valueToSel(v))
+
+ case *adt.ImportReference:
+ inst = rt.LoadImport(rt.LabelStr(x.ImportPath))
+
+ case *adt.SelectorExpr:
+ inst, path = reference(rt, c, env, x.X)
+ path = appendSelector(path, featureToSel(x.Sel, rt))
+
+ case *adt.IndexExpr:
+ inst, path = reference(rt, c, env, x.X)
+ v, _ := ctx.Evaluate(env, x.Index)
+ path = appendSelector(path, valueToSel(v))
+ }
+ if inst == nil {
+ return nil, nil
+ }
+ return inst, path
+}
+
+func mkPath(r *runtime.Runtime, a []Selector, v *adt.Vertex) (root *adt.Vertex, path []Selector) {
+ if v.Parent == nil {
+ return v, a
+ }
+ root, path = mkPath(r, a, v.Parent)
+ path = appendSelector(path, featureToSel(v.Label, r))
+ return root, path
+}
+
+type options struct {
+ concrete bool // enforce that values are concrete
+ raw bool // show original values
+ hasHidden bool
+ omitHidden bool
+ omitDefinitions bool
+ omitOptional bool
+ omitAttrs bool
+ resolveReferences bool
+ showErrors bool
+ final bool
+ ignoreClosedness bool // used for comparing APIs
+ docs bool
+ disallowCycles bool // implied by concrete
+ allowScalar bool
+}
+
+// An Option defines modes of evaluation.
+type Option option
+
+type option func(p *options)
+
+// Final indicates a value is final. It implicitly closes all structs and lists
+// in a value and selects defaults.
+func Final() Option {
+ return func(o *options) {
+ o.final = true
+ o.omitDefinitions = true
+ o.omitOptional = true
+ o.omitHidden = true
+ }
+}
+
+// Schema specifies the input is a Schema. Used by Subsume.
+func Schema() Option {
+ return func(o *options) {
+ o.ignoreClosedness = true
+ }
+}
+
+// Concrete ensures that all values are concrete.
+//
+// For Validate this means it returns an error if this is not the case.
+// In other cases a non-concrete value will be replaced with an error.
+func Concrete(concrete bool) Option {
+ return func(p *options) {
+ if concrete {
+ p.concrete = true
+ p.final = true
+ if !p.hasHidden {
+ p.omitHidden = true
+ p.omitDefinitions = true
+ }
+ }
+ }
+}
+
+// DisallowCycles forces validation in the precense of cycles, even if
+// non-concrete values are allowed. This is implied by Concrete(true).
+func DisallowCycles(disallow bool) Option {
+ return func(p *options) { p.disallowCycles = disallow }
+}
+
+// ResolveReferences forces the evaluation of references when outputting.
+// This implies the input cannot have cycles.
+func ResolveReferences(resolve bool) Option {
+ return func(p *options) {
+ p.resolveReferences = resolve
+
+ // ResolveReferences is implemented as a Value printer, rather than
+ // a definition printer, even though it should be more like the latter.
+ // To reflect this we convert incomplete errors to their original
+ // expression.
+ //
+ // TODO: ShowErrors mostly shows incomplete errors, even though this is
+ // just an approximation. There seems to be some inconsistencies as to
+ // when child errors are marked as such, making the conversion somewhat
+ // inconsistent. This option is conservative, though.
+ p.showErrors = true
+ }
+}
+
+// Raw tells Syntax to generate the value as is without any simplifications.
+func Raw() Option {
+ return func(p *options) { p.raw = true }
+}
+
+// All indicates that all fields and values should be included in processing
+// even if they can be elided or omitted.
+func All() Option {
+ return func(p *options) {
+ p.omitAttrs = false
+ p.omitHidden = false
+ p.omitDefinitions = false
+ p.omitOptional = false
+ }
+}
+
+// Docs indicates whether docs should be included.
+func Docs(include bool) Option {
+ return func(p *options) { p.docs = true }
+}
+
+// Definitions indicates whether definitions should be included.
+//
+// Definitions may still be included for certain functions if they are referred
+// to by other other values.
+func Definitions(include bool) Option {
+ return func(p *options) {
+ p.hasHidden = true
+ p.omitDefinitions = !include
+ }
+}
+
+// Hidden indicates that definitions and hidden fields should be included.
+func Hidden(include bool) Option {
+ return func(p *options) {
+ p.hasHidden = true
+ p.omitHidden = !include
+ p.omitDefinitions = !include
+ }
+}
+
+// Optional indicates that optional fields should be included.
+func Optional(include bool) Option {
+ return func(p *options) { p.omitOptional = !include }
+}
+
+// Attributes indicates that attributes should be included.
+func Attributes(include bool) Option {
+ return func(p *options) { p.omitAttrs = !include }
+}
+
+func getOptions(opts []Option) (o options) {
+ o.updateOptions(opts)
+ return
+}
+
+func (o *options) updateOptions(opts []Option) {
+ for _, fn := range opts {
+ fn(o)
+ }
+}
+
+// Validate reports any errors, recursively. The returned error may represent
+// more than one error, retrievable with errors.Errors, if more than one
+// exists.
+func (v Value) Validate(opts ...Option) error {
+ o := options{}
+ o.updateOptions(opts)
+
+ cfg := &validate.Config{
+ Concrete: o.concrete,
+ DisallowCycles: o.disallowCycles,
+ AllErrors: true,
+ }
+
+ b := validate.Validate(v.ctx(), v.v, cfg)
+ if b != nil {
+ return b.Err
+ }
+ return nil
+}
+
+// Walk descends into all values of v, calling f. If f returns false, Walk
+// will not descent further. It only visits values that are part of the data
+// model, so this excludes optional fields, hidden fields, and definitions.
+func (v Value) Walk(before func(Value) bool, after func(Value)) {
+ ctx := v.ctx()
+ switch v.Kind() {
+ case StructKind:
+ if before != nil && !before(v) {
+ return
+ }
+ obj, _ := v.structValData(ctx)
+ for i := 0; i < obj.Len(); i++ {
+ _, v := obj.At(i)
+ v.Walk(before, after)
+ }
+ case ListKind:
+ if before != nil && !before(v) {
+ return
+ }
+ list, _ := v.List()
+ for list.Next() {
+ list.Value().Walk(before, after)
+ }
+ default:
+ if before != nil {
+ before(v)
+ }
+ }
+ if after != nil {
+ after(v)
+ }
+}
+
+// Expr reports the operation of the underlying expression and the values it
+// operates on.
+//
+// For unary expressions, it returns the single value of the expression.
+//
+// For binary expressions it returns first the left and right value, in that
+// order. For associative operations however, (for instance '&' and '|'), it may
+// return more than two values, where the operation is to be applied in
+// sequence.
+//
+// For selector and index expressions it returns the subject and then the index.
+// For selectors, the index is the string value of the identifier.
+//
+// For interpolations it returns a sequence of values to be concatenated, some
+// of which will be literal strings and some unevaluated expressions.
+//
+// A builtin call expression returns the value of the builtin followed by the
+// args of the call.
+func (v Value) Expr() (Op, []Value) {
+ // TODO: return v if this is complete? Yes for now
+ if v.v == nil {
+ return NoOp, nil
+ }
+
+ var expr adt.Expr
+ var env *adt.Environment
+
+ if v.v.IsData() {
+ expr = v.v.Value()
+
+ } else {
+ switch len(v.v.Conjuncts) {
+ case 0:
+ if v.v.BaseValue == nil {
+ return NoOp, []Value{makeValue(v.idx, v.v, v.parent_)} // TODO: v?
+ }
+ expr = v.v.Value()
+
+ case 1:
+ // the default case, processed below.
+ c := v.v.Conjuncts[0]
+ env = c.Env
+ expr = c.Expr()
+ if w, ok := expr.(*adt.Vertex); ok {
+ return Value{v.idx, w, v.parent_}.Expr()
+ }
+
+ default:
+ a := []Value{}
+ ctx := v.ctx()
+ for _, c := range v.v.Conjuncts {
+ // Keep parent here. TODO: do we need remove the requirement
+ // from other conjuncts?
+ n := &adt.Vertex{
+ Parent: v.v.Parent,
+ Label: v.v.Label,
+ }
+ n.AddConjunct(c)
+ n.Finalize(ctx)
+ a = append(a, makeValue(v.idx, n, v.parent_))
+ }
+ return adt.AndOp, a
+ }
+ }
+
+ // TODO: replace appends with []Value{}. For not leave.
+ a := []Value{}
+ op := NoOp
+ switch x := expr.(type) {
+ case *adt.BinaryExpr:
+ a = append(a, remakeValue(v, env, x.X))
+ a = append(a, remakeValue(v, env, x.Y))
+ op = x.Op
+ case *adt.UnaryExpr:
+ a = append(a, remakeValue(v, env, x.X))
+ op = x.Op
+ case *adt.BoundExpr:
+ a = append(a, remakeValue(v, env, x.Expr))
+ op = x.Op
+ case *adt.BoundValue:
+ a = append(a, remakeValue(v, env, x.Value))
+ op = x.Op
+ case *adt.Conjunction:
+ // pre-expanded unification
+ for _, conjunct := range x.Values {
+ a = append(a, remakeValue(v, env, conjunct))
+ }
+ op = AndOp
+ case *adt.Disjunction:
+ count := 0
+ outer:
+ for i, disjunct := range x.Values {
+ if i < x.NumDefaults {
+ for _, n := range x.Values[x.NumDefaults:] {
+ if subsume.Simplify.Value(v.ctx(), n, disjunct) == nil {
+ continue outer
+ }
+ }
+ }
+ count++
+ a = append(a, remakeValue(v, env, disjunct))
+ }
+ if count > 1 {
+ op = OrOp
+ }
+
+ case *adt.DisjunctionExpr:
+ // Filter defaults that are subsumed by another value.
+ count := 0
+ outerExpr:
+ for _, disjunct := range x.Values {
+ if disjunct.Default {
+ for _, n := range x.Values {
+ a := adt.Vertex{
+ Label: v.v.Label,
+ }
+ b := a
+ a.AddConjunct(adt.MakeRootConjunct(env, n.Val))
+ b.AddConjunct(adt.MakeRootConjunct(env, disjunct.Val))
+
+ ctx := eval.NewContext(v.idx, nil)
+ ctx.Unify(&a, adt.Finalized)
+ ctx.Unify(&b, adt.Finalized)
+ if allowed(ctx, v.v, &b) != nil {
+ // Everything subsumed bottom
+ continue outerExpr
+ }
+ if allowed(ctx, v.v, &a) != nil {
+ // An error doesn't subsume anything except another error.
+ continue
+ }
+ a.Parent = v.v.Parent
+ if !n.Default && subsume.Simplify.Value(ctx, &a, &b) == nil {
+ continue outerExpr
+ }
+ }
+ }
+ count++
+ a = append(a, remakeValue(v, env, disjunct.Val))
+ }
+ if count > 1 {
+ op = adt.OrOp
+ }
+
+ case *adt.Interpolation:
+ for _, p := range x.Parts {
+ a = append(a, remakeValue(v, env, p))
+ }
+ op = InterpolationOp
+
+ case *adt.FieldReference:
+ // TODO: allow hard link
+ ctx := v.ctx()
+ f := ctx.PushState(env, x.Src)
+ env := ctx.Env(x.UpCount)
+ a = append(a, remakeValue(v, nil, &adt.NodeLink{Node: env.Vertex}))
+ a = append(a, remakeValue(v, nil, ctx.NewString(x.Label.SelectorString(ctx))))
+ _ = ctx.PopState(f)
+ op = SelectorOp
+
+ case *adt.SelectorExpr:
+ a = append(a, remakeValue(v, env, x.X))
+ // A string selector is quoted.
+ a = append(a, remakeValue(v, env, &adt.String{
+ Str: x.Sel.SelectorString(v.idx),
+ }))
+ op = SelectorOp
+
+ case *adt.IndexExpr:
+ a = append(a, remakeValue(v, env, x.X))
+ a = append(a, remakeValue(v, env, x.Index))
+ op = IndexOp
+ case *adt.SliceExpr:
+ a = append(a, remakeValue(v, env, x.X))
+ a = append(a, remakeValue(v, env, x.Lo))
+ a = append(a, remakeValue(v, env, x.Hi))
+ op = SliceOp
+ case *adt.CallExpr:
+ // Interpret "and" and "or" builtin semantically.
+ if fn, ok := x.Fun.(*adt.Builtin); ok && len(x.Args) == 1 &&
+ (fn.Name == "or" || fn.Name == "and") {
+
+ iter, _ := remakeValue(v, env, x.Args[0]).List()
+ for iter.Next() {
+ a = append(a, iter.Value())
+ }
+
+ op = OrOp
+ if fn.Name == "and" {
+ op = AndOp
+ }
+
+ if len(a) == 0 {
+ // Mimic semantics of builtin.
+ switch op {
+ case AndOp:
+ a = append(a, remakeValue(v, env, &adt.Top{}))
+ case OrOp:
+ a = append(a, remakeValue(v, env, &adt.Bottom{
+ Code: adt.IncompleteError,
+ Err: errors.Newf(x.Src.Fun.Pos(), "empty list in call to or"),
+ }))
+ }
+ op = NoOp
+ }
+ break
+ }
+ a = append(a, remakeValue(v, env, x.Fun))
+ for _, arg := range x.Args {
+ a = append(a, remakeValue(v, env, arg))
+ }
+ op = CallOp
+ case *adt.BuiltinValidator:
+ a = append(a, remakeValue(v, env, x.Builtin))
+ for _, arg := range x.Args {
+ a = append(a, remakeValue(v, env, arg))
+ }
+ op = CallOp
+
+ case *adt.StructLit:
+ hasEmbed := false
+ fields := []adt.Decl{}
+ for _, d := range x.Decls {
+ switch d.(type) {
+ default:
+ fields = append(fields, d)
+ case adt.Value:
+ fields = append(fields, d)
+ case adt.Expr:
+ hasEmbed = true
+ }
+ }
+
+ if !hasEmbed {
+ a = append(a, v)
+ break
+ }
+
+ ctx := v.ctx()
+
+ n := v.v
+
+ if len(fields) > 0 {
+ n = &adt.Vertex{
+ Parent: v.v.Parent,
+ Label: v.v.Label,
+ }
+
+ s := &adt.StructLit{}
+ if k := v.v.Kind(); k != adt.StructKind && k != BottomKind {
+ // TODO: we should also add such a declaration for embeddings
+ // of structs with definitions. However, this is currently
+ // also not supported at the CUE level. If we do, it may be
+ // best handled with a special mode of unification.
+ s.Decls = append(s.Decls, &adt.BasicType{K: k})
+ }
+ s.Decls = append(s.Decls, fields...)
+ c := adt.MakeRootConjunct(env, s)
+ n.AddConjunct(c)
+ n.Finalize(ctx)
+ n.Parent = v.v.Parent
+ }
+
+ // Simulate old embeddings.
+ envEmbed := &adt.Environment{
+ Up: env,
+ Vertex: n,
+ }
+
+ for _, d := range x.Decls {
+ switch x := d.(type) {
+ case adt.Value:
+ case adt.Expr:
+ // embedding
+ n := &adt.Vertex{Label: v.v.Label}
+ c := adt.MakeRootConjunct(envEmbed, x)
+ n.AddConjunct(c)
+ n.Finalize(ctx)
+ n.Parent = v.v.Parent
+ a = append(a, makeValue(v.idx, n, v.parent_))
+ }
+ }
+
+ // Could be done earlier, but keep struct with fields at end.
+ if len(fields) > 0 {
+ a = append(a, makeValue(v.idx, n, v.parent_))
+ }
+
+ if len(a) == 1 {
+ return a[0].Expr()
+ }
+ op = adt.AndOp
+
+ default:
+ a = append(a, v)
+ }
+ return op, a
+}
diff --git a/vendor/cuelang.org/go/encoding/json/json.go b/vendor/cuelang.org/go/encoding/json/json.go
new file mode 100644
index 0000000000..0e2c7f7cf1
--- /dev/null
+++ b/vendor/cuelang.org/go/encoding/json/json.go
@@ -0,0 +1,265 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package json converts JSON to and from CUE.
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/value"
+)
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(b []byte) bool {
+ return json.Valid(b)
+}
+
+// Validate validates JSON and confirms it matches the constraints
+// specified by v.
+func Validate(b []byte, v cue.Value) error {
+ if !json.Valid(b) {
+ return fmt.Errorf("json: invalid JSON")
+ }
+ r := value.ConvertToRuntime(v.Context())
+ inst, err := r.Compile("json.Validate", b)
+ if err != nil {
+ return err
+ }
+
+ v = v.Unify(inst.Value())
+ if v.Err() != nil {
+ return v.Err()
+ }
+ return nil
+}
+
+// Extract parses JSON-encoded data to a CUE expression, using path for
+// position information.
+func Extract(path string, data []byte) (ast.Expr, error) {
+ expr, err := extract(path, data)
+ if err != nil {
+ return nil, err
+ }
+ patchExpr(expr)
+ return expr, nil
+}
+
+// Decode parses JSON-encoded data to a CUE value, using path for position
+// information.
+//
+// Deprecated: use Extract and build using cue.Context.BuildExpr.
+func Decode(r *cue.Runtime, path string, data []byte) (*cue.Instance, error) {
+ expr, err := extract(path, data)
+ if err != nil {
+ return nil, err
+ }
+ return r.CompileExpr(expr)
+}
+
+func extract(path string, b []byte) (ast.Expr, error) {
+ expr, err := parser.ParseExpr(path, b)
+ if err != nil || !json.Valid(b) {
+ p := token.NoPos
+ if pos := errors.Positions(err); len(pos) > 0 {
+ p = pos[0]
+ }
+ var x interface{}
+ err := json.Unmarshal(b, &x)
+ return nil, errors.Wrapf(err, p, "invalid JSON for file %q", path)
+ }
+ return expr, nil
+}
+
+// NewDecoder configures a JSON decoder. The path is used to associate position
+// information with each node. The runtime may be nil if the decoder
+// is only used to extract to CUE ast objects.
+//
+// The runtime may be nil if Decode isn't used.
+func NewDecoder(r *cue.Runtime, path string, src io.Reader) *Decoder {
+ return &Decoder{
+ r: r,
+ path: path,
+ dec: json.NewDecoder(src),
+ offset: 1,
+ }
+}
+
+// A Decoder converts JSON values to CUE.
+type Decoder struct {
+ r *cue.Runtime
+ path string
+ dec *json.Decoder
+ offset int
+}
+
+// Extract converts the current JSON value to a CUE ast. It returns io.EOF
+// if the input has been exhausted.
+func (d *Decoder) Extract() (ast.Expr, error) {
+ expr, err := d.extract()
+ if err != nil {
+ return expr, err
+ }
+ patchExpr(expr)
+ return expr, nil
+}
+
+func (d *Decoder) extract() (ast.Expr, error) {
+ var raw json.RawMessage
+ err := d.dec.Decode(&raw)
+ if err == io.EOF {
+ return nil, err
+ }
+ offset := d.offset
+ d.offset += len(raw)
+ if err != nil {
+ pos := token.NewFile(d.path, offset, len(raw)).Pos(0, 0)
+ return nil, errors.Wrapf(err, pos, "invalid JSON for file %q", d.path)
+ }
+ expr, err := parser.ParseExpr(d.path, []byte(raw), parser.FileOffset(offset))
+ if err != nil {
+ return nil, err
+ }
+ return expr, nil
+}
+
+// Decode converts the current JSON value to a CUE instance. It returns io.EOF
+// if the input has been exhausted.
+//
+// Deprecated: use Extract and build with cue.Context.BuildExpr.
+func (d *Decoder) Decode() (*cue.Instance, error) {
+ expr, err := d.Extract()
+ if err != nil {
+ return nil, err
+ }
+ return d.r.CompileExpr(expr)
+}
+
+// patchExpr simplifies the AST parsed from JSON.
+// TODO: some of the modifications are already done in format, but are
+// a package deal of a more aggressive simplify. Other pieces of modification
+// should probably be moved to format.
+func patchExpr(n ast.Node) {
+ type info struct {
+ reflow bool
+ }
+ stack := []info{{true}}
+
+ afterFn := func(n ast.Node) {
+ switch n.(type) {
+ case *ast.ListLit, *ast.StructLit:
+ stack = stack[:len(stack)-1]
+ }
+ }
+
+ var beforeFn func(n ast.Node) bool
+
+ beforeFn = func(n ast.Node) bool {
+ isLarge := n.End().Offset()-n.Pos().Offset() > 50
+ descent := true
+
+ switch x := n.(type) {
+ case *ast.ListLit:
+ reflow := true
+ if !isLarge {
+ for _, e := range x.Elts {
+ if hasSpaces(e) {
+ reflow = false
+ break
+ }
+ }
+ }
+ stack = append(stack, info{reflow})
+ if reflow {
+ x.Lbrack = x.Lbrack.WithRel(token.NoRelPos)
+ x.Rbrack = x.Rbrack.WithRel(token.NoRelPos)
+ }
+ return true
+
+ case *ast.StructLit:
+ reflow := true
+ if !isLarge {
+ for _, e := range x.Elts {
+ if f, ok := e.(*ast.Field); !ok || hasSpaces(f) || hasSpaces(f.Value) {
+ reflow = false
+ break
+ }
+ }
+ }
+ stack = append(stack, info{reflow})
+ if reflow {
+ x.Lbrace = x.Lbrace.WithRel(token.NoRelPos)
+ x.Rbrace = x.Rbrace.WithRel(token.NoRelPos)
+ }
+ return true
+
+ case *ast.Field:
+ // label is always a string for JSON.
+ switch {
+ case true:
+ s, ok := x.Label.(*ast.BasicLit)
+ if !ok || s.Kind != token.STRING {
+ break // should not happen: implies invalid JSON
+ }
+
+ u, err := literal.Unquote(s.Value)
+ if err != nil {
+ break // should not happen: implies invalid JSON
+ }
+
+ // TODO(legacy): remove checking for '_' prefix once hidden
+ // fields are removed.
+ if !ast.IsValidIdent(u) || strings.HasPrefix(u, "_") {
+ break // keep string
+ }
+
+ x.Label = ast.NewIdent(u)
+ astutil.CopyMeta(x.Label, s)
+ }
+ ast.Walk(x.Value, beforeFn, afterFn)
+ descent = false
+
+ case *ast.BasicLit:
+ if x.Kind == token.STRING && len(x.Value) > 10 {
+ s, err := literal.Unquote(x.Value)
+ if err != nil {
+ break // should not happen: implies invalid JSON
+ }
+
+ x.Value = literal.String.WithOptionalTabIndent(len(stack)).Quote(s)
+ }
+ }
+
+ if stack[len(stack)-1].reflow {
+ ast.SetRelPos(n, token.NoRelPos)
+ }
+ return descent
+ }
+
+ ast.Walk(n, beforeFn, afterFn)
+}
+
+func hasSpaces(n ast.Node) bool {
+ return n.Pos().RelPos() > token.NoSpace
+}
diff --git a/vendor/cuelang.org/go/internal/astinternal/debugstr.go b/vendor/cuelang.org/go/internal/astinternal/debugstr.go
new file mode 100644
index 0000000000..0121b458ea
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/astinternal/debugstr.go
@@ -0,0 +1,281 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package astinternal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+)
+
+func DebugStr(x interface{}) (out string) {
+ if n, ok := x.(ast.Node); ok {
+ comments := ""
+ for _, g := range n.Comments() {
+ comments += DebugStr(g)
+ }
+ if comments != "" {
+ defer func() { out = "<" + comments + out + ">" }()
+ }
+ }
+ switch v := x.(type) {
+ case *ast.File:
+ out := ""
+ out += DebugStr(v.Decls)
+ return out
+
+ case *ast.Package:
+ out := "package "
+ out += DebugStr(v.Name)
+ return out
+
+ case *ast.LetClause:
+ out := "let "
+ out += DebugStr(v.Ident)
+ out += "="
+ out += DebugStr(v.Expr)
+ return out
+
+ case *ast.Alias:
+ out := DebugStr(v.Ident)
+ out += "="
+ out += DebugStr(v.Expr)
+ return out
+
+ case *ast.BottomLit:
+ return "_|_"
+
+ case *ast.BasicLit:
+ return v.Value
+
+ case *ast.Interpolation:
+ for _, e := range v.Elts {
+ out += DebugStr(e)
+ }
+ return out
+
+ case *ast.EmbedDecl:
+ out += DebugStr(v.Expr)
+ return out
+
+ case *ast.ImportDecl:
+ out := "import "
+ if v.Lparen != token.NoPos {
+ out += "( "
+ out += DebugStr(v.Specs)
+ out += " )"
+ } else {
+ out += DebugStr(v.Specs)
+ }
+ return out
+
+ case *ast.Comprehension:
+ out := DebugStr(v.Clauses)
+ out += DebugStr(v.Value)
+ return out
+
+ case *ast.StructLit:
+ out := "{"
+ out += DebugStr(v.Elts)
+ out += "}"
+ return out
+
+ case *ast.ListLit:
+ out := "["
+ out += DebugStr(v.Elts)
+ out += "]"
+ return out
+
+ case *ast.Ellipsis:
+ out := "..."
+ if v.Type != nil {
+ out += DebugStr(v.Type)
+ }
+ return out
+
+ case *ast.ForClause:
+ out := "for "
+ if v.Key != nil {
+ out += DebugStr(v.Key)
+ out += ": "
+ }
+ out += DebugStr(v.Value)
+ out += " in "
+ out += DebugStr(v.Source)
+ return out
+
+ case *ast.IfClause:
+ out := "if "
+ out += DebugStr(v.Condition)
+ return out
+
+ case *ast.Field:
+ out := DebugStr(v.Label)
+ if v.Optional != token.NoPos {
+ out += "?"
+ }
+ if v.Value != nil {
+ switch v.Token {
+ case token.ILLEGAL, token.COLON:
+ out += ": "
+ default:
+ out += fmt.Sprintf(" %s ", v.Token)
+ }
+ out += DebugStr(v.Value)
+ for _, a := range v.Attrs {
+ out += " "
+ out += DebugStr(a)
+ }
+ }
+ return out
+
+ case *ast.Attribute:
+ return v.Text
+
+ case *ast.Ident:
+ return v.Name
+
+ case *ast.SelectorExpr:
+ return DebugStr(v.X) + "." + DebugStr(v.Sel)
+
+ case *ast.CallExpr:
+ out := DebugStr(v.Fun)
+ out += "("
+ out += DebugStr(v.Args)
+ out += ")"
+ return out
+
+ case *ast.ParenExpr:
+ out := "("
+ out += DebugStr(v.X)
+ out += ")"
+ return out
+
+ case *ast.UnaryExpr:
+ return v.Op.String() + DebugStr(v.X)
+
+ case *ast.BinaryExpr:
+ out := DebugStr(v.X)
+ op := v.Op.String()
+ if 'a' <= op[0] && op[0] <= 'z' {
+ op = fmt.Sprintf(" %s ", op)
+ }
+ out += op
+ out += DebugStr(v.Y)
+ return out
+
+ case []*ast.CommentGroup:
+ var a []string
+ for _, c := range v {
+ a = append(a, DebugStr(c))
+ }
+ return strings.Join(a, "\n")
+
+ case *ast.CommentGroup:
+ str := "["
+ if v.Doc {
+ str += "d"
+ }
+ if v.Line {
+ str += "l"
+ }
+ str += strconv.Itoa(int(v.Position))
+ var a = []string{}
+ for _, c := range v.List {
+ a = append(a, c.Text)
+ }
+ return str + strings.Join(a, " ") + "] "
+
+ case *ast.IndexExpr:
+ out := DebugStr(v.X)
+ out += "["
+ out += DebugStr(v.Index)
+ out += "]"
+ return out
+
+ case *ast.SliceExpr:
+ out := DebugStr(v.X)
+ out += "["
+ out += DebugStr(v.Low)
+ out += ":"
+ out += DebugStr(v.High)
+ out += "]"
+ return out
+
+ case *ast.ImportSpec:
+ out := ""
+ if v.Name != nil {
+ out += DebugStr(v.Name)
+ out += " "
+ }
+ out += DebugStr(v.Path)
+ return out
+
+ case []ast.Decl:
+ if len(v) == 0 {
+ return ""
+ }
+ out := ""
+ for _, d := range v {
+ out += DebugStr(d)
+ out += sep
+ }
+ return out[:len(out)-len(sep)]
+
+ case []ast.Clause:
+ if len(v) == 0 {
+ return ""
+ }
+ out := ""
+ for _, c := range v {
+ out += DebugStr(c)
+ out += " "
+ }
+ return out
+
+ case []ast.Expr:
+ if len(v) == 0 {
+ return ""
+ }
+ out := ""
+ for _, d := range v {
+ out += DebugStr(d)
+ out += sep
+ }
+ return out[:len(out)-len(sep)]
+
+ case []*ast.ImportSpec:
+ if len(v) == 0 {
+ return ""
+ }
+ out := ""
+ for _, d := range v {
+ out += DebugStr(d)
+ out += sep
+ }
+ return out[:len(out)-len(sep)]
+
+ default:
+ if v == nil {
+ return ""
+ }
+ return fmt.Sprintf("<%T>", x)
+ }
+}
+
+const sep = ", "
diff --git a/vendor/cuelang.org/go/internal/attrs.go b/vendor/cuelang.org/go/internal/attrs.go
new file mode 100644
index 0000000000..058948012f
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/attrs.go
@@ -0,0 +1,252 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+)
+
+// AttrKind indicates the location of an attribute within CUE source.
+type AttrKind uint8
+
+const (
+ // FieldAttr indicates an attribute is a field attribute.
+ // foo: bar @attr()
+ FieldAttr AttrKind = 1 << iota
+
+ // DeclAttr indicates an attribute was specified at a declaration position.
+ // foo: {
+ // @attr()
+ // }
+ DeclAttr
+
+ // TODO: Possible future attr kinds
+ // ElemAttr
+ // FileAttr
+ // ValueAttr = FieldAttr|DeclAttr|ElemAttr
+)
+
+// Attr holds positional information for a single Attr.
+type Attr struct {
+ Name string // e.g. "json" or "protobuf"
+ Body string
+ Kind AttrKind
+ Fields []KeyValue
+ Err error
+}
+
+// NewNonExisting creates a non-existing attribute.
+func NewNonExisting(key string) Attr {
+ const msgNotExist = "attribute %q does not exist"
+ return Attr{Err: errors.Newf(token.NoPos, msgNotExist, key)}
+}
+
+type KeyValue struct {
+ data string
+ equal int // index of equal sign or 0 if non-existing
+}
+
+func (kv *KeyValue) Text() string { return kv.data }
+func (kv *KeyValue) Key() string {
+ if kv.equal == 0 {
+ return kv.data
+ }
+ s := kv.data[:kv.equal]
+ s = strings.TrimSpace(s)
+ return s
+}
+func (kv *KeyValue) Value() string {
+ if kv.equal == 0 {
+ return ""
+ }
+ return strings.TrimSpace(kv.data[kv.equal+1:])
+}
+
+func (a *Attr) hasPos(p int) error {
+ if a.Err != nil {
+ return a.Err
+ }
+ if p >= len(a.Fields) {
+ return fmt.Errorf("field does not exist")
+ }
+ return nil
+}
+
+// String reports the possibly empty string value at the given position or
+// an error the attribute is invalid or if the position does not exist.
+func (a *Attr) String(pos int) (string, error) {
+ if err := a.hasPos(pos); err != nil {
+ return "", err
+ }
+ return a.Fields[pos].Text(), nil
+}
+
+// Int reports the integer at the given position or an error if the attribute is
+// invalid, the position does not exist, or the value at the given position is
+// not an integer.
+func (a *Attr) Int(pos int) (int64, error) {
+ if err := a.hasPos(pos); err != nil {
+ return 0, err
+ }
+ // TODO: use CUE's literal parser once it exists, allowing any of CUE's
+ // number types.
+ return strconv.ParseInt(a.Fields[pos].Text(), 10, 64)
+}
+
+// Flag reports whether an entry with the given name exists at position pos or
+// onwards or an error if the attribute is invalid or if the first pos-1 entries
+// are not defined.
+func (a *Attr) Flag(pos int, key string) (bool, error) {
+ if err := a.hasPos(pos - 1); err != nil {
+ return false, err
+ }
+ for _, kv := range a.Fields[pos:] {
+ if kv.Text() == key {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Lookup searches for an entry of the form key=value from position pos onwards
+// and reports the value if found. It reports an error if the attribute is
+// invalid or if the first pos-1 entries are not defined.
+func (a *Attr) Lookup(pos int, key string) (val string, found bool, err error) {
+ if err := a.hasPos(pos - 1); err != nil {
+ return "", false, err
+ }
+ for _, kv := range a.Fields[pos:] {
+ if kv.Key() == key {
+ return kv.Value(), true, nil
+ }
+ }
+ return "", false, nil
+}
+
+func ParseAttrBody(pos token.Pos, s string) (a Attr) {
+ a.Body = s
+ i := 0
+ for {
+ i += skipSpace(s[i:])
+ // always scan at least one, possibly empty element.
+ n, err := scanAttributeElem(pos, s[i:], &a)
+ if err != nil {
+ return Attr{Err: err}
+ }
+ if i += n; i >= len(s) {
+ break
+ }
+ i += skipSpace(s[i:])
+ if s[i] != ',' {
+ return Attr{Err: errors.Newf(pos, "invalid attribute: expected comma")}
+ }
+ i++
+ }
+ return a
+}
+
+func skipSpace(s string) int {
+ for n, r := range s {
+ if !unicode.IsSpace(r) {
+ return n
+ }
+ }
+ return 0
+}
+
+func scanAttributeElem(pos token.Pos, s string, a *Attr) (n int, err errors.Error) {
+ // try CUE string
+ kv := KeyValue{}
+ if n, kv.data, err = scanAttributeString(pos, s); n == 0 {
+ // try key-value pair
+ p := strings.IndexAny(s, ",=") // ) is assumed to be stripped.
+ switch {
+ case p < 0:
+ kv.data = strings.TrimSpace(s)
+ n = len(s)
+
+ default: // ','
+ n = p
+ kv.data = strings.TrimSpace(s[:n])
+
+ case s[p] == '=':
+ kv.equal = p
+ offset := p + 1
+ offset += skipSpace(s[offset:])
+ var str string
+ if p, str, err = scanAttributeString(pos, s[offset:]); p > 0 {
+ n = offset + p
+ kv.data = s[:offset] + str
+ } else {
+ n = len(s)
+ if p = strings.IndexByte(s[offset:], ','); p >= 0 {
+ n = offset + p
+ }
+ kv.data = strings.TrimSpace(s[:n])
+ }
+ }
+ }
+ if a != nil {
+ a.Fields = append(a.Fields, kv)
+ }
+ return n, err
+}
+
+func scanAttributeString(pos token.Pos, s string) (n int, str string, err errors.Error) {
+ if s == "" || (s[0] != '#' && s[0] != '"' && s[0] != '\'') {
+ return 0, "", nil
+ }
+
+ nHash := 0
+ for {
+ if nHash < len(s) {
+ if s[nHash] == '#' {
+ nHash++
+ continue
+ }
+ if s[nHash] == '\'' || s[nHash] == '"' {
+ break
+ }
+ }
+ return nHash, s[:nHash], errors.Newf(pos, "invalid attribute string")
+ }
+
+ // Determine closing quote.
+ nQuote := 1
+ if c := s[nHash]; nHash+6 < len(s) && s[nHash+1] == c && s[nHash+2] == c {
+ nQuote = 3
+ }
+ close := s[nHash:nHash+nQuote] + s[:nHash]
+
+ // Search for closing quote.
+ index := strings.Index(s[len(close):], close)
+ if index == -1 {
+ return len(s), "", errors.Newf(pos, "attribute string not terminated")
+ }
+
+ index += 2 * len(close)
+ s, err2 := literal.Unquote(s[:index])
+ if err2 != nil {
+ return index, "", errors.Newf(pos, "invalid attribute string: %v", err2)
+ }
+ return index, s, nil
+}
diff --git a/vendor/cuelang.org/go/internal/cli/cli.go b/vendor/cuelang.org/go/internal/cli/cli.go
new file mode 100644
index 0000000000..f6ffd251fe
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/cli/cli.go
@@ -0,0 +1,91 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cli
+
+import (
+ "strings"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+)
+
+func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs errors.Error) {
+ var expr ast.Expr
+
+ if k&cue.NumberKind != 0 {
+ var err error
+ expr, err = parser.ParseExpr(name, str)
+ if err != nil {
+ errs = errors.Wrapf(err, pos,
+ "invalid number for environment variable %s", name)
+ }
+ }
+
+ if k&cue.BoolKind != 0 {
+ str = strings.TrimSpace(str)
+ b, ok := boolValues[str]
+ if !ok {
+ errs = errors.Append(errs, errors.Newf(pos,
+ "invalid boolean value %q for environment variable %s", str, name))
+ } else if expr != nil || k&cue.StringKind != 0 {
+ // Convert into an expression
+ bl := ast.NewBool(b)
+ if expr != nil {
+ expr = &ast.BinaryExpr{Op: token.OR, X: expr, Y: bl}
+ } else {
+ expr = bl
+ }
+ } else {
+ x = ast.NewBool(b)
+ }
+ }
+
+ if k&cue.StringKind != 0 {
+ if expr != nil {
+ expr = &ast.BinaryExpr{Op: token.OR, X: expr, Y: ast.NewString(str)}
+ } else {
+ x = ast.NewString(str)
+ }
+ }
+
+ switch {
+ case expr != nil:
+ return expr, nil
+ case x != nil:
+ return x, nil
+ case errs == nil:
+ return nil, errors.Newf(pos,
+ "invalid type for environment variable %s", name)
+ }
+ return nil, errs
+}
+
+var boolValues = map[string]bool{
+ "1": true,
+ "0": false,
+ "t": true,
+ "f": false,
+ "T": true,
+ "F": false,
+ "true": true,
+ "false": false,
+ "TRUE": true,
+ "FALSE": false,
+ "True": true,
+ "False": false,
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/adt.go b/vendor/cuelang.org/go/internal/core/adt/adt.go
new file mode 100644
index 0000000000..1a286e7596
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/adt.go
@@ -0,0 +1,380 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+)
+
+func Resolve(ctx *OpContext, c Conjunct) *Vertex {
+ env := c.Env
+ // TODO: also allow resolution in parent scopes. The following will set up
+ // the environments. But the compiler also needs to resolve accordingly.
+ //
+ // // Set up environments for parent scopes, if any.
+ // root := env
+ // for p := scope; p != nil; p = p.Parent {
+ // root.Up = &Environment{Vertex: p.Parent}
+ // root = root.Up
+ // }
+
+ var v Value
+
+ expr := c.Elem()
+ switch x := expr.(type) {
+ case Value:
+ v = x
+
+ case Resolver:
+ r, err := ctx.Resolve(env, x)
+ if err != nil {
+ v = err
+ break
+ }
+ // r.Finalize(ctx) // TODO: Finalize here?
+ return r
+
+ case Evaluator:
+ // TODO: have a way to evaluate, but not strip down to the value.
+ v, _ = ctx.Evaluate(env, expr.(Expr))
+
+ default:
+ // Unknown type.
+ v = ctx.NewErrf(
+ "could not evaluate expression %s of type %T", c.Elem(), c)
+ }
+
+ return ToVertex(v)
+}
+
+// A Node is any abstract data type representing an value or expression.
+type Node interface {
+ Source() ast.Node
+ node() // enforce internal.
+}
+
+// A Decl represents all valid StructLit elements.
+type Decl interface {
+ Node
+ declNode()
+}
+
+// An Elem represents all value ListLit elements.
+//
+// All Elem values can be used as a Decl.
+type Elem interface {
+ Decl
+ elemNode()
+}
+
+// An Expr corresponds to an ast.Expr.
+//
+// All Expr values can be used as an Elem or Decl.
+type Expr interface {
+ Elem
+ expr()
+}
+
+// A BaseValue is any Value or a *Marker. It indicates the type of a Vertex.
+type BaseValue interface {
+ Kind() Kind
+}
+
+// A Value represents a node in the evaluated data graph.
+//
+// All Values values can also be used as a Expr.
+type Value interface {
+ Expr
+ Concreteness() Concreteness
+ Kind() Kind
+}
+
+// An Evaluator provides a method to convert to a value.
+type Evaluator interface {
+ Node
+
+ // evaluate evaluates the underlying expression. If the expression
+ // is incomplete, it may record the error in ctx and return nil.
+ evaluate(ctx *OpContext) Value
+}
+
+// A Resolver represents a reference somewhere else within a tree that resolves
+// a value.
+type Resolver interface {
+ Node
+ resolve(ctx *OpContext, state VertexStatus) *Vertex
+}
+
+type YieldFunc func(env *Environment)
+
+// A Yielder represents 0 or more labeled values of structs or lists.
+type Yielder interface {
+ Node
+ yield(ctx *OpContext, fn YieldFunc)
+}
+
+// A Validator validates a Value. All Validators are Values.
+type Validator interface {
+ Value
+ validate(c *OpContext, v Value) *Bottom
+}
+
+// Pos returns the file position of n, or token.NoPos if it is unknown.
+func Pos(n Node) token.Pos {
+ src := n.Source()
+ if src == nil {
+ return token.NoPos
+ }
+ return src.Pos()
+}
+
+// Value
+
+func (x *Vertex) Concreteness() Concreteness {
+ // Depends on concreteness of value.
+ switch v := x.BaseValue.(type) {
+ case nil:
+ return Concrete // Should be indetermined.
+
+ case Value:
+ return v.Concreteness()
+
+ default: // *StructMarker, *ListMarker:
+ return Concrete
+ }
+}
+
+func (x *NodeLink) Concreteness() Concreteness { return Concrete }
+
+func (*Conjunction) Concreteness() Concreteness { return Constraint }
+func (*Disjunction) Concreteness() Concreteness { return Constraint }
+func (*BoundValue) Concreteness() Concreteness { return Constraint }
+
+func (*Builtin) Concreteness() Concreteness { return Concrete }
+func (*BuiltinValidator) Concreteness() Concreteness { return Constraint }
+
+// Value and Expr
+
+func (*Bottom) Concreteness() Concreteness { return BottomLevel }
+func (*Null) Concreteness() Concreteness { return Concrete }
+func (*Bool) Concreteness() Concreteness { return Concrete }
+func (*Num) Concreteness() Concreteness { return Concrete }
+func (*String) Concreteness() Concreteness { return Concrete }
+func (*Bytes) Concreteness() Concreteness { return Concrete }
+func (*Top) Concreteness() Concreteness { return Any }
+func (*BasicType) Concreteness() Concreteness { return Type }
+
+// Expr
+
+func (*StructLit) expr() {}
+func (*ListLit) expr() {}
+func (*DisjunctionExpr) expr() {}
+
+// TODO: also allow?
+// a: b: if cond {}
+//
+// It is unclear here, though, whether field `a` should be added
+// unconditionally.
+// func (*Comprehension) expr() {}
+
+// Expr and Value
+
+func (*Bottom) expr() {}
+func (*Null) expr() {}
+func (*Bool) expr() {}
+func (*Num) expr() {}
+func (*String) expr() {}
+func (*Bytes) expr() {}
+func (*Top) expr() {}
+func (*BasicType) expr() {}
+func (*Vertex) expr() {}
+func (*ListMarker) expr() {}
+func (*StructMarker) expr() {}
+func (*Conjunction) expr() {}
+func (*Disjunction) expr() {}
+func (*BoundValue) expr() {}
+func (*BuiltinValidator) expr() {}
+func (*Builtin) expr() {}
+
+// Expr and Resolver
+
+func (*NodeLink) expr() {}
+func (*FieldReference) expr() {}
+func (*ValueReference) expr() {}
+func (*LabelReference) expr() {}
+func (*DynamicReference) expr() {}
+func (*ImportReference) expr() {}
+func (*LetReference) expr() {}
+
+// Expr and Evaluator
+
+func (*BoundExpr) expr() {}
+func (*SelectorExpr) expr() {}
+func (*IndexExpr) expr() {}
+func (*SliceExpr) expr() {}
+func (*Interpolation) expr() {}
+func (*UnaryExpr) expr() {}
+func (*BinaryExpr) expr() {}
+func (*CallExpr) expr() {}
+
+// Decl and Expr (so allow attaching original source in Conjunct)
+
+func (*Field) declNode() {}
+func (x *Field) expr() Expr { return x.Value }
+func (*OptionalField) declNode() {}
+func (x *OptionalField) expr() Expr { return x.Value }
+func (*BulkOptionalField) declNode() {}
+func (x *BulkOptionalField) expr() Expr { return x.Value }
+func (*DynamicField) declNode() {}
+func (x *DynamicField) expr() Expr { return x.Value }
+
+// Decl, Elem, and Expr (so allow attaching original source in Conjunct)
+
+func (*Ellipsis) elemNode() {}
+func (*Ellipsis) declNode() {}
+func (x *Ellipsis) expr() Expr {
+ if x.Value == nil {
+ return top
+ }
+ return x.Value
+}
+
+var top = &Top{}
+
+// Decl and Yielder
+
+func (*LetClause) declNode() {}
+
+// Decl and Elem
+
+func (*StructLit) declNode() {}
+func (*StructLit) elemNode() {}
+func (*ListLit) declNode() {}
+func (*ListLit) elemNode() {}
+func (*Bottom) declNode() {}
+func (*Bottom) elemNode() {}
+func (*Null) declNode() {}
+func (*Null) elemNode() {}
+func (*Bool) declNode() {}
+func (*Bool) elemNode() {}
+func (*Num) declNode() {}
+func (*Num) elemNode() {}
+func (*String) declNode() {}
+func (*String) elemNode() {}
+func (*Bytes) declNode() {}
+func (*Bytes) elemNode() {}
+func (*Top) declNode() {}
+func (*Top) elemNode() {}
+func (*BasicType) declNode() {}
+func (*BasicType) elemNode() {}
+func (*BoundExpr) declNode() {}
+func (*BoundExpr) elemNode() {}
+func (*Vertex) declNode() {}
+func (*Vertex) elemNode() {}
+func (*ListMarker) declNode() {}
+func (*ListMarker) elemNode() {}
+func (*StructMarker) declNode() {}
+func (*StructMarker) elemNode() {}
+func (*Conjunction) declNode() {}
+func (*Conjunction) elemNode() {}
+func (*Disjunction) declNode() {}
+func (*Disjunction) elemNode() {}
+func (*BoundValue) declNode() {}
+func (*BoundValue) elemNode() {}
+func (*BuiltinValidator) declNode() {}
+func (*BuiltinValidator) elemNode() {}
+func (*NodeLink) declNode() {}
+func (*NodeLink) elemNode() {}
+func (*FieldReference) declNode() {}
+func (*FieldReference) elemNode() {}
+func (*ValueReference) declNode() {}
+func (*ValueReference) elemNode() {}
+func (*LabelReference) declNode() {}
+func (*LabelReference) elemNode() {}
+func (*DynamicReference) declNode() {}
+func (*DynamicReference) elemNode() {}
+func (*ImportReference) declNode() {}
+func (*ImportReference) elemNode() {}
+func (*LetReference) declNode() {}
+func (*LetReference) elemNode() {}
+func (*SelectorExpr) declNode() {}
+func (*SelectorExpr) elemNode() {}
+func (*IndexExpr) declNode() {}
+func (*IndexExpr) elemNode() {}
+func (*SliceExpr) declNode() {}
+func (*SliceExpr) elemNode() {}
+func (*Interpolation) declNode() {}
+func (*Interpolation) elemNode() {}
+func (*UnaryExpr) declNode() {}
+func (*UnaryExpr) elemNode() {}
+func (*BinaryExpr) declNode() {}
+func (*BinaryExpr) elemNode() {}
+func (*CallExpr) declNode() {}
+func (*CallExpr) elemNode() {}
+func (*Builtin) declNode() {}
+func (*Builtin) elemNode() {}
+func (*DisjunctionExpr) declNode() {}
+func (*DisjunctionExpr) elemNode() {}
+
+// Decl, Elem, and Yielder
+
+func (*Comprehension) declNode() {}
+func (*Comprehension) elemNode() {}
+
+// Node
+
+func (*Vertex) node() {}
+func (*Conjunction) node() {}
+func (*Disjunction) node() {}
+func (*BoundValue) node() {}
+func (*Builtin) node() {}
+func (*BuiltinValidator) node() {}
+func (*Bottom) node() {}
+func (*Null) node() {}
+func (*Bool) node() {}
+func (*Num) node() {}
+func (*String) node() {}
+func (*Bytes) node() {}
+func (*Top) node() {}
+func (*BasicType) node() {}
+func (*StructLit) node() {}
+func (*ListLit) node() {}
+func (*BoundExpr) node() {}
+func (*NodeLink) node() {}
+func (*FieldReference) node() {}
+func (*ValueReference) node() {}
+func (*LabelReference) node() {}
+func (*DynamicReference) node() {}
+func (*ImportReference) node() {}
+func (*LetReference) node() {}
+func (*SelectorExpr) node() {}
+func (*IndexExpr) node() {}
+func (*SliceExpr) node() {}
+func (*Interpolation) node() {}
+func (*UnaryExpr) node() {}
+func (*BinaryExpr) node() {}
+func (*CallExpr) node() {}
+func (*DisjunctionExpr) node() {}
+func (*Field) node() {}
+func (*OptionalField) node() {}
+func (*BulkOptionalField) node() {}
+func (*DynamicField) node() {}
+func (*Ellipsis) node() {}
+func (*Comprehension) node() {}
+func (*ForClause) node() {}
+func (*IfClause) node() {}
+func (*LetClause) node() {}
+func (*ValueClause) node() {}
diff --git a/vendor/cuelang.org/go/internal/core/adt/binop.go b/vendor/cuelang.org/go/internal/core/adt/binop.go
new file mode 100644
index 0000000000..e2410a1e8f
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/binop.go
@@ -0,0 +1,321 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "bytes"
+ "strings"
+)
+
+// BinOp handles all operations except AndOp and OrOp. This includes processing
+// unary comparators such as '<4' and '=~"foo"'.
+//
+// BinOp returns nil if not both left and right are concrete.
+func BinOp(c *OpContext, op Op, left, right Value) Value {
+ leftKind := left.Kind()
+ rightKind := right.Kind()
+
+ const msg = "non-concrete value '%v' to operation '%s'"
+ if left.Concreteness() > Concrete {
+ return &Bottom{
+ Code: IncompleteError,
+ Err: c.Newf(msg, left, op),
+ }
+ }
+ if right.Concreteness() > Concrete {
+ return &Bottom{
+ Code: IncompleteError,
+ Err: c.Newf(msg, right, op),
+ }
+ }
+
+ if err := CombineErrors(c.src, left, right); err != nil {
+ return err
+ }
+
+ switch op {
+ case EqualOp:
+ switch {
+ case leftKind == NullKind && rightKind == NullKind:
+ return c.newBool(true)
+
+ case leftKind == NullKind || rightKind == NullKind:
+ return c.newBool(false)
+
+ case leftKind == BoolKind:
+ return c.newBool(c.BoolValue(left) == c.BoolValue(right))
+
+ case leftKind == StringKind:
+ // normalize?
+ return cmpTonode(c, op, strings.Compare(c.StringValue(left), c.StringValue(right)))
+
+ case leftKind == BytesKind:
+ return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op)))
+
+ case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+ // n := c.newNum()
+ return cmpTonode(c, op, c.Num(left, op).X.Cmp(&c.Num(right, op).X))
+
+ case leftKind == ListKind && rightKind == ListKind:
+ x := c.Elems(left)
+ y := c.Elems(right)
+ if len(x) != len(y) {
+ return c.newBool(false)
+ }
+ for i, e := range x {
+ a, _ := c.Concrete(nil, e, op)
+ b, _ := c.Concrete(nil, y[i], op)
+ if !test(c, EqualOp, a, b) {
+ return c.newBool(false)
+ }
+ }
+ return c.newBool(true)
+ }
+
+ case NotEqualOp:
+ switch {
+ case leftKind == NullKind && rightKind == NullKind:
+ return c.newBool(false)
+
+ case leftKind == NullKind || rightKind == NullKind:
+ return c.newBool(true)
+
+ case leftKind == BoolKind:
+ return c.newBool(c.boolValue(left, op) != c.boolValue(right, op))
+
+ case leftKind == StringKind:
+ // normalize?
+ return cmpTonode(c, op, strings.Compare(c.StringValue(left), c.StringValue(right)))
+
+ case leftKind == BytesKind:
+ return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op)))
+
+ case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+ // n := c.newNum()
+ return cmpTonode(c, op, c.Num(left, op).X.Cmp(&c.Num(right, op).X))
+
+ case leftKind == ListKind && rightKind == ListKind:
+ x := c.Elems(left)
+ y := c.Elems(right)
+ if len(x) != len(y) {
+ return c.newBool(false)
+ }
+ for i, e := range x {
+ a, _ := c.Concrete(nil, e, op)
+ b, _ := c.Concrete(nil, y[i], op)
+ if !test(c, EqualOp, a, b) {
+ return c.newBool(true)
+ }
+ }
+ return c.newBool(false)
+ }
+
+ case LessThanOp, LessEqualOp, GreaterEqualOp, GreaterThanOp:
+ switch {
+ case leftKind == StringKind && rightKind == StringKind:
+ // normalize?
+ return cmpTonode(c, op, strings.Compare(c.stringValue(left, op), c.stringValue(right, op)))
+
+ case leftKind == BytesKind && rightKind == BytesKind:
+ return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op)))
+
+ case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+ // n := c.newNum(left, right)
+ return cmpTonode(c, op, c.Num(left, op).X.Cmp(&c.Num(right, op).X))
+ }
+
+ case BoolAndOp:
+ return c.newBool(c.boolValue(left, op) && c.boolValue(right, op))
+
+ case BoolOrOp:
+ return c.newBool(c.boolValue(left, op) || c.boolValue(right, op))
+
+ case MatchOp:
+ // if y.re == nil {
+ // // This really should not happen, but leave in for safety.
+ // b, err := Regexp.MatchString(str, x.str)
+ // if err != nil {
+ // return c.Errf(Src, "error parsing Regexp: %v", err)
+ // }
+ // return boolTonode(Src, b)
+ // }
+ return c.newBool(c.regexp(right).MatchString(c.stringValue(left, op)))
+
+ case NotMatchOp:
+ return c.newBool(!c.regexp(right).MatchString(c.stringValue(left, op)))
+
+ case AddOp:
+ switch {
+ case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+ return c.Add(c.Num(left, op), c.Num(right, op))
+
+ case leftKind == StringKind && rightKind == StringKind:
+ return c.NewString(c.StringValue(left) + c.StringValue(right))
+
+ case leftKind == BytesKind && rightKind == BytesKind:
+ ba := c.bytesValue(left, op)
+ bb := c.bytesValue(right, op)
+ b := make([]byte, len(ba)+len(bb))
+ copy(b, ba)
+ copy(b[len(ba):], bb)
+ return c.newBytes(b)
+
+ case leftKind == ListKind && rightKind == ListKind:
+ // TODO: get rid of list addition. Semantically it is somewhat
+ // unclear and, as it turns out, it is also hard to get right.
+ // Simulate addition with comprehensions now.
+ if err := c.Err(); err != nil {
+ return err
+ }
+
+ x := MakeIdentLabel(c, "x", "")
+
+ forClause := func(src Expr) *Comprehension {
+ s := &StructLit{Decls: []Decl{
+ &FieldReference{UpCount: 1, Label: x},
+ }}
+ return &Comprehension{
+ Clauses: &ForClause{
+ Value: x,
+ Src: src,
+ Dst: &ValueClause{s},
+ },
+ Value: s,
+ }
+ }
+
+ list := &ListLit{
+ Elems: []Elem{
+ forClause(left),
+ forClause(right),
+ },
+ }
+
+ n := &Vertex{}
+ n.AddConjunct(MakeRootConjunct(c.Env(0), list))
+ n.Finalize(c)
+
+ return n
+ }
+
+ case SubtractOp:
+ return c.Sub(c.Num(left, op), c.Num(right, op))
+
+ case MultiplyOp:
+ switch {
+ // float
+ case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+ return c.Mul(c.Num(left, op), c.Num(right, op))
+
+ case leftKind == StringKind && rightKind == IntKind:
+ const as = "string multiplication"
+ return c.NewString(strings.Repeat(c.stringValue(left, as), int(c.uint64(right, as))))
+
+ case leftKind == IntKind && rightKind == StringKind:
+ const as = "string multiplication"
+ return c.NewString(strings.Repeat(c.stringValue(right, as), int(c.uint64(left, as))))
+
+ case leftKind == BytesKind && rightKind == IntKind:
+ const as = "bytes multiplication"
+ return c.newBytes(bytes.Repeat(c.bytesValue(left, as), int(c.uint64(right, as))))
+
+ case leftKind == IntKind && rightKind == BytesKind:
+ const as = "bytes multiplication"
+ return c.newBytes(bytes.Repeat(c.bytesValue(right, as), int(c.uint64(left, as))))
+
+ case leftKind == ListKind && rightKind == IntKind:
+ left, right = right, left
+ fallthrough
+
+ case leftKind == IntKind && rightKind == ListKind:
+ // TODO: get rid of list multiplication.
+
+ list := &ListLit{}
+ x := MakeIdentLabel(c, "x", "")
+
+ for i := c.uint64(left, "list multiplier"); i > 0; i-- {
+ st := &StructLit{Decls: []Decl{
+ &FieldReference{UpCount: 1, Label: x},
+ }}
+ list.Elems = append(list.Elems,
+ &Comprehension{
+ Clauses: &ForClause{
+ Value: x,
+ Src: right,
+ Dst: &ValueClause{st},
+ },
+ Value: st,
+ },
+ )
+ }
+ if err := c.Err(); err != nil {
+ return err
+ }
+
+ n := &Vertex{}
+ n.AddConjunct(MakeRootConjunct(c.Env(0), list))
+ n.Finalize(c)
+
+ return n
+ }
+
+ case FloatQuotientOp:
+ if leftKind&NumKind != 0 && rightKind&NumKind != 0 {
+ return c.Quo(c.Num(left, op), c.Num(right, op))
+ }
+
+ case IntDivideOp:
+ if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+ return c.IntDiv(c.Num(left, op), c.Num(right, op))
+ }
+
+ case IntModuloOp:
+ if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+ return c.IntMod(c.Num(left, op), c.Num(right, op))
+ }
+
+ case IntQuotientOp:
+ if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+ return c.IntQuo(c.Num(left, op), c.Num(right, op))
+ }
+
+ case IntRemainderOp:
+ if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+ return c.IntRem(c.Num(left, op), c.Num(right, op))
+ }
+ }
+
+ return c.NewErrf("invalid operands %s and %s to '%s' (type %s and %s)",
+ left, right, op, left.Kind(), right.Kind())
+}
+
+func cmpTonode(c *OpContext, op Op, r int) Value {
+ result := false
+ switch op {
+ case LessThanOp:
+ result = r == -1
+ case LessEqualOp:
+ result = r != 1
+ case EqualOp, AndOp:
+ result = r == 0
+ case NotEqualOp:
+ result = r != 0
+ case GreaterEqualOp:
+ result = r != -1
+ case GreaterThanOp:
+ result = r == 1
+ }
+ return c.newBool(result)
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/closed.go b/vendor/cuelang.org/go/internal/core/adt/closed.go
new file mode 100644
index 0000000000..34a3e45e39
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/closed.go
@@ -0,0 +1,518 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// This file implements the closedness algorithm.
+
+// Outline of algorithm
+//
+// To compute closedness each Vertex is associated with a tree which has
+// leaf nodes with sets of allowed labels, and interior nodes that describe
+// how these sets may be combines: Or, for embedding, or And for definitions.
+//
+// Each conjunct of a Vertex is associated with such a leaf node. Each
+// conjunct that evaluates to a struct is added to the list of Structs, which
+// in the end forms this tree. If a conjunct is embedded, or references another
+// struct or definition, it adds interior node to reflect this.
+//
+// To test whether a feature is allowed, it must satisfy the resulting
+// expression tree.
+//
+// In order to avoid having to copy the tree for each node, the tree is linked
+// from leaf node to root, rather than the other way around. This allows
+// parent nodes to be shared as the tree grows and ensures that the growth
+// of the tree is bounded by the number of conjuncts. As a consequence, this
+// requires a two-pass algorithm:
+//
+// - walk up to mark which nodes are required and count the number of
+// child nodes that need to be satisfied.
+// - verify fields in leaf structs and mark parent leafs as satisfied
+// when appropriate.
+//
+// A label is allowed if all required root nodes are marked as accepted after
+// these two passes.
+//
+
+// A note on embeddings: it is important to keep track which conjuncts originate
+// from an embedding, as an embedded value may eventually turn into a closed
+// struct. Consider
+//
+// a: {
+// b
+// d: e: int
+// }
+// b: d: {
+// #A & #B
+// }
+//
+// At the point of evaluating `a`, the struct is not yet closed. However,
+// descending into `d` will trigger the inclusion of definitions which in turn
+// causes the struct to be closed. At this point, it is important to know that
+// `b` originated from an embedding, as otherwise `e` may not be allowed.
+
+// TODO(perf):
+// - less nodes
+// - disable StructInfo nodes that can no longer pass a feature
+// - sort StructInfos active ones first.
+
+// TODO(errors): return a dedicated ConflictError that can track original
+// positions on demand.
+
+func (v *Vertex) IsInOneOf(t SpanType) bool {
+ for _, s := range v.Structs {
+ if s.CloseInfo.IsInOneOf(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// IsRecursivelyClosed returns true if this value is either a definition or unified
+// with a definition.
+func (v *Vertex) IsRecursivelyClosed() bool {
+ return v.Closed || v.IsInOneOf(DefinitionSpan)
+}
+
+type closeNodeType uint8
+
+const (
+ // a closeRef node is created when there is a non-definition reference.
+ // These nodes are not necessary for computing results, but may be
+ // relevant down the line to group closures through embedded values and
+ // to track position information for failures.
+ closeRef closeNodeType = iota
+
+ // closeDef indicates this node was introduced as a result of referencing
+ // a definition.
+ closeDef
+
+ // closeEmbed indicates this node was added as a result of an embedding.
+ closeEmbed
+
+ _ = closeRef // silence the linter
+)
+
+// TODO: merge with closeInfo: this is a leftover of the refactoring.
+type CloseInfo struct {
+ *closeInfo
+
+ IsClosed bool
+ FieldTypes OptionalType
+}
+
+func (c CloseInfo) Location() Node {
+ if c.closeInfo == nil {
+ return nil
+ }
+ return c.closeInfo.location
+}
+
+func (c CloseInfo) SpanMask() SpanType {
+ if c.closeInfo == nil {
+ return 0
+ }
+ return c.span
+}
+
+func (c CloseInfo) RootSpanType() SpanType {
+ if c.closeInfo == nil {
+ return 0
+ }
+ return c.root
+}
+
+func (c CloseInfo) IsInOneOf(t SpanType) bool {
+ if c.closeInfo == nil {
+ return false
+ }
+ return c.span&t != 0
+}
+
+// TODO(perf): remove: error positions should always be computed on demand
+// in dedicated error types.
+func (c *CloseInfo) AddPositions(ctx *OpContext) {
+ for s := c.closeInfo; s != nil; s = s.parent {
+ if loc := s.location; loc != nil {
+ ctx.AddPosition(loc)
+ }
+ }
+}
+
+// TODO(perf): use on StructInfo. Then if parent and expression are the same
+// it is possible to use cached value.
+func (c CloseInfo) SpawnEmbed(x Expr) CloseInfo {
+ var span SpanType
+ if c.closeInfo != nil {
+ span = c.span
+ }
+
+ c.closeInfo = &closeInfo{
+ parent: c.closeInfo,
+ location: x,
+ mode: closeEmbed,
+ root: EmbeddingSpan,
+ span: span | EmbeddingSpan,
+ }
+ return c
+}
+
+// SpawnGroup is used for structs that contain embeddings that may end up
+// closing the struct. This is to force that `b` is not allowed in
+//
+// a: {#foo} & {b: int}
+//
+func (c CloseInfo) SpawnGroup(x Expr) CloseInfo {
+ var span SpanType
+ if c.closeInfo != nil {
+ span = c.span
+ }
+ c.closeInfo = &closeInfo{
+ parent: c.closeInfo,
+ location: x,
+ span: span,
+ }
+ return c
+}
+
+// SpawnSpan is used to track that a value is introduced by a comprehension
+// or constraint. Definition and embedding spans are introduced with SpawnRef
+// and SpawnEmbed, respectively.
+func (c CloseInfo) SpawnSpan(x Node, t SpanType) CloseInfo {
+ var span SpanType
+ if c.closeInfo != nil {
+ span = c.span
+ }
+ c.closeInfo = &closeInfo{
+ parent: c.closeInfo,
+ location: x,
+ root: t,
+ span: span | t,
+ }
+ return c
+}
+
+func (c CloseInfo) SpawnRef(arc *Vertex, isDef bool, x Expr) CloseInfo {
+ var span SpanType
+ if c.closeInfo != nil {
+ span = c.span
+ }
+ c.closeInfo = &closeInfo{
+ parent: c.closeInfo,
+ location: x,
+ span: span,
+ }
+ if isDef {
+ c.mode = closeDef
+ c.closeInfo.root = DefinitionSpan
+ c.closeInfo.span |= DefinitionSpan
+ }
+ return c
+}
+
+// isDef reports whether an expressions is a reference that references a
+// definition anywhere in its selection path.
+//
+// TODO(performance): this should be merged with resolve(). But for now keeping
+// this code isolated makes it easier to see what it is for.
+func IsDef(x Expr) bool {
+ switch r := x.(type) {
+ case *FieldReference:
+ return r.Label.IsDef()
+
+ case *SelectorExpr:
+ if r.Sel.IsDef() {
+ return true
+ }
+ return IsDef(r.X)
+
+ case *IndexExpr:
+ return IsDef(r.X)
+ }
+ return false
+}
+
+// A SpanType is used to indicate whether a CUE value is within the scope of
+// a certain CUE language construct, the span type.
+type SpanType uint8
+
+const (
+ // EmbeddingSpan means that this value was embedded at some point and should
+ // not be included as a possible root node in the todo field of OpContext.
+ EmbeddingSpan SpanType = 1 << iota
+ ConstraintSpan
+ ComprehensionSpan
+ DefinitionSpan
+)
+
+type closeInfo struct {
+ // location records the expression that led to this node's introduction.
+ location Node
+
+ // The parent node in the tree.
+ parent *closeInfo
+
+ // TODO(performance): if references are chained, we could have a separate
+ // parent pointer to skip the chain.
+
+ // mode indicates whether this node was added as part of an embedding,
+ // definition or non-definition reference.
+ mode closeNodeType
+
+ // noCheck means this struct is irrelevant for closedness checking. This can
+ // happen when:
+ // - it is a sibling of a new definition.
+ noCheck bool // don't process for inclusion info
+
+ root SpanType
+ span SpanType
+}
+
+// closeStats holds the administrative fields for a closeInfo value. Each
+// closeInfo is associated with a single closeStats value per unification
+// operator. This association is done through an OpContext. This allows the
+// same value to be used in multiple concurrent unification operations.
+// NOTE: there are other parts of the algorithm that are not thread-safe yet.
+type closeStats struct {
+ // the other fields of this closeStats value are only valid if generation
+ // is equal to the generation in OpContext. This allows for lazy
+ // initialization of closeStats.
+ generation int
+
+ // These counts keep track of how many required child nodes need to be
+ // completed before this node is accepted.
+ requiredCount int
+ acceptedCount int
+
+ // accepted is set if this node is accepted.
+ accepted bool
+
+ required bool
+ next *closeStats
+}
+
+func (c *closeInfo) isClosed() bool {
+ return c.mode == closeDef
+}
+
+func isClosed(v *Vertex) bool {
+ for _, s := range v.Structs {
+ if s.IsClosed {
+ return true
+ }
+ for c := s.closeInfo; c != nil; c = c.parent {
+ if c.isClosed() {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Accept determines whether f is allowed in n. It uses the OpContext for
+// caching administrative fields.
+func Accept(ctx *OpContext, n *Vertex, f Feature) (found, required bool) {
+ ctx.generation++
+ ctx.todo = nil
+
+ var optionalTypes OptionalType
+
+ // TODO(perf): more aggressively determine whether a struct is open or
+ // closed: open structs do not have to be checked, yet they can particularly
+ // be the ones with performance isssues, for instanced as a result of
+ // embedded for comprehensions.
+ for _, s := range n.Structs {
+ if !s.useForAccept() {
+ continue
+ }
+ markCounts(ctx, s.CloseInfo)
+ optionalTypes |= s.types
+ }
+
+ var str Value
+ if f.Index() == MaxIndex {
+ f &= fTypeMask
+ } else if optionalTypes&(HasComplexPattern|HasDynamic) != 0 && f.IsString() {
+ str = f.ToValue(ctx)
+ }
+
+ for _, s := range n.Structs {
+ if !s.useForAccept() {
+ continue
+ }
+ if verifyArc(ctx, s, f, str) {
+ // Beware: don't add to below expression: this relies on the
+ // side effects of markUp.
+ ok := markUp(ctx, s.closeInfo, 0)
+ found = found || ok
+ }
+ }
+
+ // Reject if any of the roots is not accepted.
+ for x := ctx.todo; x != nil; x = x.next {
+ if !x.accepted {
+ return false, true
+ }
+ }
+
+ return found, ctx.todo != nil
+}
+
+func markCounts(ctx *OpContext, info CloseInfo) {
+ if info.IsClosed {
+ markRequired(ctx, info.closeInfo)
+ return
+ }
+ for s := info.closeInfo; s != nil; s = s.parent {
+ if s.isClosed() {
+ markRequired(ctx, s)
+ return
+ }
+ }
+}
+
+func markRequired(ctx *OpContext, info *closeInfo) {
+ count := 0
+ for ; ; info = info.parent {
+ var s closeInfo
+ if info != nil {
+ s = *info
+ }
+
+ x := getScratch(ctx, info)
+
+ x.requiredCount += count
+
+ if x.required {
+ return
+ }
+
+ if s.span&EmbeddingSpan == 0 {
+ x.next = ctx.todo
+ ctx.todo = x
+ }
+
+ x.required = true
+
+ if info == nil {
+ return
+ }
+
+ count = 0
+ if s.mode != closeEmbed {
+ count = 1
+ }
+ }
+}
+
+func markUp(ctx *OpContext, info *closeInfo, count int) bool {
+ for ; ; info = info.parent {
+ var s closeInfo
+ if info != nil {
+ s = *info
+ }
+
+ x := getScratch(ctx, info)
+
+ x.acceptedCount += count
+
+ if x.acceptedCount < x.requiredCount {
+ return false
+ }
+
+ x.accepted = true
+
+ if info == nil {
+ return true
+ }
+
+ count = 0
+ if x.required && s.mode != closeEmbed {
+ count = 1
+ }
+ }
+}
+
+// getScratch: explain generation.
+func getScratch(ctx *OpContext, s *closeInfo) *closeStats {
+ m := ctx.closed
+ if m == nil {
+ m = map[*closeInfo]*closeStats{}
+ ctx.closed = m
+ }
+
+ x := m[s]
+ if x == nil {
+ x = &closeStats{}
+ m[s] = x
+ }
+
+ if x.generation != ctx.generation {
+ *x = closeStats{generation: ctx.generation}
+ }
+
+ return x
+}
+
+func verifyArc(ctx *OpContext, s *StructInfo, f Feature, label Value) bool {
+ isRegular := f.IsString()
+
+ o := s.StructLit
+ env := s.Env
+
+ if isRegular && (len(o.Additional) > 0 || o.IsOpen) {
+ return true
+ }
+
+ for _, g := range o.Fields {
+ if f == g.Label {
+ return true
+ }
+ }
+
+ if !isRegular {
+ return false
+ }
+
+ // Do not record errors during this validation.
+ errs := ctx.errs
+ defer func() { ctx.errs = errs }()
+
+ if len(o.Dynamic) > 0 && f.IsString() && label != nil {
+ for _, b := range o.Dynamic {
+ v := env.evalCached(ctx, b.Key)
+ s, ok := Unwrap(v).(*String)
+ if !ok {
+ continue
+ }
+ if label.(*String).Str == s.Str {
+ return true
+ }
+ }
+ }
+
+ for _, b := range o.Bulk {
+ if matchBulk(ctx, env, b, f, label) {
+ return true
+ }
+ }
+
+ // TODO(perf): delay adding this position: create a special error type that
+ // computes all necessary positions on demand.
+ if ctx != nil {
+ ctx.AddPosition(s.StructLit)
+ }
+
+ return false
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/closed2.go b/vendor/cuelang.org/go/internal/core/adt/closed2.go
new file mode 100644
index 0000000000..ada0342d2d
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/closed2.go
@@ -0,0 +1,68 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// CloseDef defines how individual fieldSets (corresponding to conjuncts)
+// combine to determine whether a field is contained in a closed set.
+//
+// A CloseDef combines multiple conjuncts and embeddings. All CloseDefs are
+// stored in slice. References to other CloseDefs are indices within this slice.
+// Together they define the top of the tree of the expression tree of how
+// conjuncts combine together (a canopy).
+
+// isComplexStruct reports whether the Closed information should be copied as a
+// subtree into the parent node using InsertSubtree. If not, the conjuncts can
+// just be inserted at the current ID.
+func isComplexStruct(ctx *OpContext, v *Vertex) bool {
+ return v.IsClosedStruct()
+}
+
+// TODO: cleanup code and error messages. Reduce duplication in some related
+// code.
+func verifyArc2(ctx *OpContext, f Feature, v *Vertex, isClosed bool) (found bool, err *Bottom) {
+ // Don't check computed, temporary vertices.
+ if v.Label == InvalidLabel {
+ return true, nil
+ }
+
+ // TODO(perf): collect positions in error.
+ defer ctx.ReleasePositions(ctx.MarkPositions())
+
+ // Note: it is okay to use parent here as this only needs to be computed
+ // for the original location.
+ if ok, required := Accept(ctx, v.Parent, f); ok || (!required && !isClosed) {
+ return true, nil
+ }
+
+ if !f.IsString() {
+ // if f.IsHidden() { Also change Accept in composite.go
+ return false, nil
+ }
+
+ if v != nil {
+ for _, c := range v.Conjuncts {
+ if pos := c.Field(); pos != nil {
+ ctx.AddPosition(pos)
+ }
+ }
+ }
+
+ for _, s := range v.Parent.Structs {
+ s.AddPositions(ctx)
+ }
+
+ label := f.SelectorString(ctx)
+ return false, ctx.NewErrf("field not allowed: %s", label)
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/composite.go b/vendor/cuelang.org/go/internal/core/adt/composite.go
new file mode 100644
index 0000000000..603abf2386
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/composite.go
@@ -0,0 +1,838 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "fmt"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// TODO: unanswered questions about structural cycles:
+//
+// 1. When detecting a structural cycle, should we consider this as:
+// a) an unevaluated value,
+// b) an incomplete error (which does not affect parent validity), or
+// c) a special value.
+//
+// Making it an error is the simplest way to ensure reentrancy is disallowed:
+// without an error it would require an additional mechanism to stop reentrancy
+// from continuing to process. Even worse, in some cases it may only partially
+// evaluate, resulting in unexpected results. For this reason, we are taking
+// approach `b` for now.
+//
+// This has some consequences of how disjunctions are treated though. Consider
+//
+// list: {
+// head: _
+// tail: list | null
+// }
+//
+// When making it an error, evaluating the above will result in
+//
+// list: {
+// head: _
+// tail: null
+// }
+//
+// because list will result in a structural cycle, and thus an error, it will be
+// stripped from the disjunction. This may or may not be a desirable property. A
+// nice thing is that it is not required to write `list | *null`. A disadvantage
+// is that this is perhaps somewhat inexplicit.
+//
+// When not making it an error (and simply cease evaluating child arcs upon
+// cycle detection), the result would be:
+//
+// list: {
+// head: _
+// tail: list | null
+// }
+//
+// In other words, an evaluation would result in a cycle and thus an error.
+// Implementations can recognize such cases by having unevaluated arcs. An
+// explicit structure cycle marker would probably be less error prone.
+//
+// Note that in both cases, a reference to list will still use the original
+// conjuncts, so the result will be the same for either method in this case.
+//
+//
+// 2. Structural cycle allowance.
+//
+// Structural cycle detection disallows reentrancy as well. This means one
+// cannot use structs for recursive computation. This will probably preclude
+// evaluation of some configuration. Given that there is no real alternative
+// yet, we could allow structural cycle detection to be optionally disabled.
+
+// An Environment links the parent scopes for identifier lookup to a composite
+// node. Each conjunct that make up node in the tree can be associated with
+// a different environment (although some conjuncts may share an Environment).
+type Environment struct {
+ Up *Environment
+ Vertex *Vertex
+
+ // DynamicLabel is only set when instantiating a field from a pattern
+ // constraint. It is used to resolve label references.
+ DynamicLabel Feature
+
+ // TODO(perf): make the following public fields a shareable struct as it
+ // mostly is going to be the same for child nodes.
+
+ // Cyclic indicates a structural cycle was detected for this conjunct or one
+ // of its ancestors.
+ Cyclic bool
+
+ // Deref keeps track of nodes that should dereference to Vertex. It is used
+ // for detecting structural cycle.
+ //
+ // The detection algorithm is based on Tomabechi's quasi-destructive graph
+ // unification. This detection requires dependencies to be resolved into
+ // fully dereferenced vertices. This is not the case in our algorithm:
+ // the result of evaluating conjuncts is placed into dereferenced vertices
+ // _after_ they are evaluated, but the Environment still points to the
+ // non-dereferenced context.
+ //
+ // In order to be able to detect structural cycles, we need to ensure that
+ // at least one node that is part of a cycle in the context in which
+ // conjunctions are evaluated dereferences correctly.
+ //
+ // The only field necessary to detect a structural cycle, however, is
+ // the Status field of the Vertex. So rather than dereferencing a node
+ // proper, it is sufficient to copy the Status of the dereferenced nodes
+ // to these nodes (will always be EvaluatingArcs).
+ Deref []*Vertex
+
+ // Cycles contains vertices for which cycles are detected. It is used
+ // for tracking self-references within structural cycles.
+ //
+ // Unlike Deref, Cycles is not incremented with child nodes.
+ // TODO: Cycles is always a tail end of Deref, so this can be optimized.
+ Cycles []*Vertex
+
+ cache map[Expr]Value
+}
+
+type ID int32
+
+// evalCached is used to look up let expressions. Caching let expressions
+// prevents a possible combinatorial explosion.
+func (e *Environment) evalCached(c *OpContext, x Expr) Value {
+ if v, ok := x.(Value); ok {
+ return v
+ }
+ v, ok := e.cache[x]
+ if !ok {
+ if e.cache == nil {
+ e.cache = map[Expr]Value{}
+ }
+ env, src := c.e, c.src
+ c.e, c.src = e, x.Source()
+ v = c.evalState(x, Partial) // TODO: should this be Finalized?
+ c.e, c.src = env, src
+ if b, ok := v.(*Bottom); !ok || !b.IsIncomplete() {
+ e.cache[x] = v
+ }
+ }
+ return v
+}
+
+// A Vertex is a node in the value tree. It may be a leaf or internal node.
+// It may have arcs to represent elements of a fully evaluated struct or list.
+//
+// For structs, it only contains definitions and concrete fields.
+// optional fields are dropped.
+//
+// It maintains source information such as a list of conjuncts that contributed
+// to the value.
+type Vertex struct {
+ // Parent links to a parent Vertex. This parent should only be used to
+ // access the parent's Label field to find the relative location within a
+ // tree.
+ Parent *Vertex
+
+ // Label is the feature leading to this vertex.
+ Label Feature
+
+ // State:
+ // eval: nil, BaseValue: nil -- unevaluated
+ // eval: *, BaseValue: nil -- evaluating
+ // eval: *, BaseValue: * -- finalized
+ //
+ state *nodeContext
+ // TODO: move the following status fields to nodeContext.
+
+ // status indicates the evaluation progress of this vertex.
+ status VertexStatus
+
+ // isData indicates that this Vertex is to be interepreted as data: pattern
+ // and additional constraints, as well as optional fields, should be
+ // ignored.
+ isData bool
+ Closed bool
+ nonMonotonicReject bool
+ nonMonotonicInsertGen int32
+ nonMonotonicLookupGen int32
+
+ // EvalCount keeps track of temporary dereferencing during evaluation.
+ // If EvalCount > 0, status should be considered to be EvaluatingArcs.
+ EvalCount int32
+
+ // SelfCount is used for tracking self-references.
+ SelfCount int32
+
+ // BaseValue is the value associated with this vertex. For lists and structs
+ // this is a sentinel value indicating its kind.
+ BaseValue BaseValue
+
+ // ChildErrors is the collection of all errors of children.
+ ChildErrors *Bottom
+
+ // The parent of nodes can be followed to determine the path within the
+ // configuration of this node.
+ // Value Value
+ Arcs []*Vertex // arcs are sorted in display order.
+
+ // Conjuncts lists the structs that ultimately formed this Composite value.
+ // This includes all selected disjuncts.
+ //
+ // This value may be nil, in which case the Arcs are considered to define
+ // the final value of this Vertex.
+ Conjuncts []Conjunct
+
+ // Structs is a slice of struct literals that contributed to this value.
+ // This information is used to compute the topological sort of arcs.
+ Structs []*StructInfo
+}
+
+func (v *Vertex) Clone() *Vertex {
+ c := *v
+ c.state = nil
+ return &c
+}
+
+type StructInfo struct {
+ *StructLit
+
+ Env *Environment
+
+ CloseInfo
+
+ // Embed indicates the struct in which this struct is embedded (originally),
+ // or nil if this is a root structure.
+ // Embed *StructInfo
+ // Context *RefInfo // the location from which this struct originates.
+ Disable bool
+
+ Embedding bool
+}
+
+// TODO(perf): this could be much more aggressive for eliminating structs that
+// are immaterial for closing.
+func (s *StructInfo) useForAccept() bool {
+ if c := s.closeInfo; c != nil {
+ return !c.noCheck
+ }
+ return true
+}
+
+// VertexStatus indicates the evaluation progress of a Vertex.
+type VertexStatus int8
+
+const (
+ // Unprocessed indicates a Vertex has not been processed before.
+ // Value must be nil.
+ Unprocessed VertexStatus = iota
+
+ // Evaluating means that the current Vertex is being evaluated. If this is
+ // encountered it indicates a reference cycle. Value must be nil.
+ Evaluating
+
+ // Partial indicates that the result was only partially evaluated. It will
+ // need to be fully evaluated to get a complete results.
+ //
+ // TODO: this currently requires a renewed computation. Cache the
+ // nodeContext to allow reusing the computations done so far.
+ Partial
+
+ // AllArcs is request only. It must be past Partial, but
+ // before recursively resolving arcs.
+ AllArcs
+
+ // EvaluatingArcs indicates that the arcs of the Vertex are currently being
+ // evaluated. If this is encountered it indicates a structural cycle.
+ // Value does not have to be nil
+ EvaluatingArcs
+
+ // Finalized means that this node is fully evaluated and that the results
+ // are save to use without further consideration.
+ Finalized
+)
+
+func (s VertexStatus) String() string {
+ switch s {
+ case Unprocessed:
+ return "unprocessed"
+ case Evaluating:
+ return "evaluating"
+ case Partial:
+ return "partial"
+ case AllArcs:
+ return "allarcs"
+ case EvaluatingArcs:
+ return "evaluatingArcs"
+ case Finalized:
+ return "finalized"
+ default:
+ return "unknown"
+ }
+}
+
+func (v *Vertex) Status() VertexStatus {
+ if v.EvalCount > 0 {
+ return EvaluatingArcs
+ }
+ return v.status
+}
+
+func (v *Vertex) UpdateStatus(s VertexStatus) {
+ Assertf(v.status <= s+1, "attempt to regress status from %d to %d", v.Status(), s)
+
+ if s == Finalized && v.BaseValue == nil {
+ // panic("not finalized")
+ }
+ v.status = s
+}
+
+// Value returns the Value of v without definitions if it is a scalar
+// or itself otherwise.
+func (v *Vertex) Value() Value {
+ switch x := v.BaseValue.(type) {
+ case nil:
+ return nil
+ case *StructMarker, *ListMarker:
+ return v
+ case Value:
+ // TODO: recursively descend into Vertex?
+ return x
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v.BaseValue))
+ }
+}
+
+// isUndefined reports whether a vertex does not have a useable BaseValue yet.
+func (v *Vertex) isUndefined() bool {
+ switch v.BaseValue {
+ case nil, cycle:
+ return true
+ }
+ return false
+}
+
+func (x *Vertex) IsConcrete() bool {
+ return x.Concreteness() <= Concrete
+}
+
+// IsData reports whether v should be interpreted in data mode. In other words,
+// it tells whether optional field matching and non-regular fields, like
+// definitions and hidden fields, should be ignored.
+func (v *Vertex) IsData() bool {
+ return v.isData || len(v.Conjuncts) == 0
+}
+
+// ToDataSingle creates a new Vertex that represents just the regular fields
+// of this vertex. Arcs are left untouched.
+// It is used by cue.Eval to convert nodes to data on per-node basis.
+func (v *Vertex) ToDataSingle() *Vertex {
+ w := *v
+ w.isData = true
+ w.state = nil
+ w.status = Finalized
+ return &w
+}
+
+// ToDataAll returns a new v where v and all its descendents contain only
+// the regular fields.
+func (v *Vertex) ToDataAll() *Vertex {
+ arcs := make([]*Vertex, 0, len(v.Arcs))
+ for _, a := range v.Arcs {
+ if a.Label.IsRegular() {
+ arcs = append(arcs, a.ToDataAll())
+ }
+ }
+ w := *v
+ w.state = nil
+ w.status = Finalized
+
+ w.BaseValue = toDataAll(w.BaseValue)
+ w.Arcs = arcs
+ w.isData = true
+ w.Conjuncts = make([]Conjunct, len(v.Conjuncts))
+ // TODO(perf): this is not strictly necessary for evaluation, but it can
+ // hurt performance greatly. Drawback is that it may disable ordering.
+ for _, s := range w.Structs {
+ s.Disable = true
+ }
+ copy(w.Conjuncts, v.Conjuncts)
+ for i, c := range w.Conjuncts {
+ if v, _ := c.x.(Value); v != nil {
+ w.Conjuncts[i].x = toDataAll(v).(Value)
+ }
+ }
+ return &w
+}
+
+func toDataAll(v BaseValue) BaseValue {
+ switch x := v.(type) {
+ default:
+ return x
+
+ case *Vertex:
+ return x.ToDataAll()
+
+ // The following cases are always erroneous, but we handle them anyway
+ // to avoid issues with the closedness algorithm down the line.
+ case *Disjunction:
+ d := *x
+ d.Values = make([]*Vertex, len(x.Values))
+ for i, v := range x.Values {
+ d.Values[i] = v.ToDataAll()
+ }
+ return &d
+
+ case *Conjunction:
+ c := *x
+ c.Values = make([]Value, len(x.Values))
+ for i, v := range x.Values {
+ // This case is okay because the source is of type Value.
+ c.Values[i] = toDataAll(v).(Value)
+ }
+ return &c
+ }
+}
+
+// func (v *Vertex) IsEvaluating() bool {
+// return v.Value == cycle
+// }
+
+func (v *Vertex) IsErr() bool {
+ // if v.Status() > Evaluating {
+ if _, ok := v.BaseValue.(*Bottom); ok {
+ return true
+ }
+ // }
+ return false
+}
+
+func (v *Vertex) Err(c *OpContext, state VertexStatus) *Bottom {
+ c.Unify(v, state)
+ if b, ok := v.BaseValue.(*Bottom); ok {
+ return b
+ }
+ return nil
+}
+
+// func (v *Vertex) Evaluate()
+
+func (v *Vertex) Finalize(c *OpContext) {
+ // Saving and restoring the error context prevents v from panicking in
+ // case the caller did not handle existing errors in the context.
+ err := c.errs
+ c.errs = nil
+ c.Unify(v, Finalized)
+ c.errs = err
+}
+
+func (v *Vertex) AddErr(ctx *OpContext, b *Bottom) {
+ v.SetValue(ctx, Finalized, CombineErrors(nil, v.Value(), b))
+}
+
+func (v *Vertex) SetValue(ctx *OpContext, state VertexStatus, value BaseValue) *Bottom {
+ v.BaseValue = value
+ v.UpdateStatus(state)
+ return nil
+}
+
+// ToVertex wraps v in a new Vertex, if necessary.
+func ToVertex(v Value) *Vertex {
+ switch x := v.(type) {
+ case *Vertex:
+ return x
+ default:
+ n := &Vertex{
+ status: Finalized,
+ BaseValue: x,
+ }
+ n.AddConjunct(MakeRootConjunct(nil, v))
+ return n
+ }
+}
+
+// Unwrap returns the possibly non-concrete scalar value of v or nil if v is
+// a list, struct or of undefined type.
+func Unwrap(v Value) Value {
+ x, ok := v.(*Vertex)
+ if !ok {
+ return v
+ }
+ x = x.Indirect()
+ if n := x.state; n != nil && isCyclePlaceholder(x.BaseValue) {
+ if n.errs != nil && !n.errs.IsIncomplete() {
+ return n.errs
+ }
+ if n.scalar != nil {
+ return n.scalar
+ }
+ }
+ return x.Value()
+}
+
+// Indirect unrolls indirections of Vertex values. These may be introduced,
+// for instance, by temporary bindings such as comprehension values.
+// It returns v itself if v does not point to another Vertex.
+func (v *Vertex) Indirect() *Vertex {
+ for {
+ arc, ok := v.BaseValue.(*Vertex)
+ if !ok {
+ return v
+ }
+ v = arc
+ }
+}
+
+// OptionalType is a bit field of the type of optional constraints in use by an
+// Acceptor.
+type OptionalType int8
+
+const (
+ HasField OptionalType = 1 << iota // X: T
+ HasDynamic // (X): T or "\(X)": T
+ HasPattern // [X]: T
+ HasComplexPattern // anything but a basic type
+ HasAdditional // ...T
+ IsOpen // Defined for all fields
+)
+
+func (v *Vertex) Kind() Kind {
+ // This is possible when evaluating comprehensions. It is potentially
+ // not known at this time what the type is.
+ switch {
+ case v.state != nil:
+ return v.state.kind
+ case v.BaseValue == nil:
+ return TopKind
+ default:
+ return v.BaseValue.Kind()
+ }
+}
+
+func (v *Vertex) OptionalTypes() OptionalType {
+ var mask OptionalType
+ for _, s := range v.Structs {
+ mask |= s.OptionalTypes()
+ }
+ return mask
+}
+
+// IsOptional reports whether a field is explicitly defined as optional,
+// as opposed to whether it is allowed by a pattern constraint.
+func (v *Vertex) IsOptional(label Feature) bool {
+ for _, s := range v.Structs {
+ if s.IsOptional(label) {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *Vertex) accepts(ok, required bool) bool {
+ return ok || (!required && !v.Closed)
+}
+
+func (v *Vertex) IsClosedStruct() bool {
+ switch x := v.BaseValue.(type) {
+ default:
+ return false
+
+ case *StructMarker:
+ if x.NeedClose {
+ return true
+ }
+
+ case *Disjunction:
+ }
+ return v.Closed || isClosed(v)
+}
+
+func (v *Vertex) IsClosedList() bool {
+ if x, ok := v.BaseValue.(*ListMarker); ok {
+ return !x.IsOpen
+ }
+ return false
+}
+
+// TODO: return error instead of boolean? (or at least have version that does.)
+func (v *Vertex) Accept(ctx *OpContext, f Feature) bool {
+ if x, ok := v.BaseValue.(*Disjunction); ok {
+ for _, v := range x.Values {
+ if v.Accept(ctx, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ if f.IsInt() {
+ switch v.BaseValue.(type) {
+ case *ListMarker:
+ // TODO(perf): use precomputed length.
+ if f.Index() < len(v.Elems()) {
+ return true
+ }
+ return !v.IsClosedList()
+
+ default:
+ return v.Kind()&ListKind != 0
+ }
+ }
+
+ if k := v.Kind(); k&StructKind == 0 && f.IsString() {
+ // If the value is bottom, we may not really know if this used to
+ // be a struct.
+ if k != BottomKind || len(v.Structs) == 0 {
+ return false
+ }
+ }
+
+ if f.IsHidden() || !v.IsClosedStruct() || v.Lookup(f) != nil {
+ return true
+ }
+
+ // TODO(perf): collect positions in error.
+ defer ctx.ReleasePositions(ctx.MarkPositions())
+
+ return v.accepts(Accept(ctx, v, f))
+}
+
+// MatchAndInsert finds the conjuncts for optional fields, pattern
+// constraints, and additional constraints that match f and inserts them in
+// arc. Use f is 0 to match all additional constraints only.
+func (v *Vertex) MatchAndInsert(ctx *OpContext, arc *Vertex) {
+ if !v.Accept(ctx, arc.Label) {
+ return
+ }
+
+ // Go backwards to simulate old implementation.
+ for i := len(v.Structs) - 1; i >= 0; i-- {
+ s := v.Structs[i]
+ if s.Disable {
+ continue
+ }
+ s.MatchAndInsert(ctx, arc)
+ }
+}
+
+func (v *Vertex) IsList() bool {
+ _, ok := v.BaseValue.(*ListMarker)
+ return ok
+}
+
+// Lookup returns the Arc with label f if it exists or nil otherwise.
+func (v *Vertex) Lookup(f Feature) *Vertex {
+ for _, a := range v.Arcs {
+ if a.Label == f {
+ a = a.Indirect()
+ return a
+ }
+ }
+ return nil
+}
+
+// Elems returns the regular elements of a list.
+func (v *Vertex) Elems() []*Vertex {
+ // TODO: add bookkeeping for where list arcs start and end.
+ a := make([]*Vertex, 0, len(v.Arcs))
+ for _, x := range v.Arcs {
+ if x.Label.IsInt() {
+ a = append(a, x)
+ }
+ }
+ return a
+}
+
+// GetArc returns a Vertex for the outgoing arc with label f. It creates and
+// ads one if it doesn't yet exist.
+func (v *Vertex) GetArc(c *OpContext, f Feature) (arc *Vertex, isNew bool) {
+ arc = v.Lookup(f)
+ if arc == nil {
+ for _, a := range v.state.usedArcs {
+ if a.Label == f {
+ arc = a
+ v.Arcs = append(v.Arcs, arc)
+ isNew = true
+ if c.nonMonotonicInsertNest > 0 {
+ a.nonMonotonicInsertGen = c.nonMonotonicGeneration
+ }
+ break
+ }
+ }
+ }
+ if arc == nil {
+ arc = &Vertex{Parent: v, Label: f}
+ v.Arcs = append(v.Arcs, arc)
+ isNew = true
+ if c.nonMonotonicInsertNest > 0 {
+ arc.nonMonotonicInsertGen = c.nonMonotonicGeneration
+ }
+ }
+ if c.nonMonotonicInsertNest == 0 {
+ arc.nonMonotonicInsertGen = 0
+ }
+ return arc, isNew
+}
+
+func (v *Vertex) Source() ast.Node {
+ if v != nil {
+ if b, ok := v.BaseValue.(Value); ok {
+ return b.Source()
+ }
+ }
+ return nil
+}
+
+// AddConjunct adds the given Conjuncts to v if it doesn't already exist.
+func (v *Vertex) AddConjunct(c Conjunct) *Bottom {
+ if v.BaseValue != nil {
+ // TODO: investigate why this happens at all. Removing it seems to
+ // change the order of fields in some cases.
+ //
+ // This is likely a bug in the evaluator and should not happen.
+ return &Bottom{Err: errors.Newf(token.NoPos, "cannot add conjunct")}
+ }
+ v.addConjunct(c)
+ return nil
+}
+
+func (v *Vertex) addConjunct(c Conjunct) {
+ for _, x := range v.Conjuncts {
+ if x == c {
+ return
+ }
+ }
+ v.Conjuncts = append(v.Conjuncts, c)
+}
+
+func (v *Vertex) AddStruct(s *StructLit, env *Environment, ci CloseInfo) *StructInfo {
+ info := StructInfo{
+ StructLit: s,
+ Env: env,
+ CloseInfo: ci,
+ }
+ for _, t := range v.Structs {
+ if *t == info {
+ return t
+ }
+ }
+ t := &info
+ v.Structs = append(v.Structs, t)
+ return t
+}
+
+// Path computes the sequence of Features leading from the root to of the
+// instance to this Vertex.
+//
+// NOTE: this is for debugging purposes only.
+func (v *Vertex) Path() []Feature {
+ return appendPath(nil, v)
+}
+
+func appendPath(a []Feature, v *Vertex) []Feature {
+ if v.Parent == nil {
+ return a
+ }
+ a = appendPath(a, v.Parent)
+ if v.Label != 0 {
+ // A Label may be 0 for programmatically inserted nodes.
+ a = append(a, v.Label)
+ }
+ return a
+}
+
+// An Conjunct is an Environment-Expr pair. The Environment is the starting
+// point for reference lookup for any reference contained in X.
+type Conjunct struct {
+ Env *Environment
+ x Node
+
+ // CloseInfo is a unique number that tracks a group of conjuncts that need
+ // belong to a single originating definition.
+ CloseInfo CloseInfo
+}
+
+// TODO(perf): replace with composite literal if this helps performance.
+
+// MakeRootConjunct creates a conjunct from the given environment and node.
+// It panics if x cannot be used as an expression.
+func MakeRootConjunct(env *Environment, x Node) Conjunct {
+ return MakeConjunct(env, x, CloseInfo{})
+}
+
+func MakeConjunct(env *Environment, x Node, id CloseInfo) Conjunct {
+ if env == nil {
+ // TODO: better is to pass one.
+ env = &Environment{}
+ }
+ switch x.(type) {
+ case Elem, interface{ expr() Expr }:
+ default:
+ panic(fmt.Sprintf("invalid Node type %T", x))
+ }
+ return Conjunct{env, x, id}
+}
+
+func (c *Conjunct) Source() ast.Node {
+ return c.x.Source()
+}
+
+func (c *Conjunct) Field() Node {
+ return c.x
+}
+
+// Elem retrieves the Elem form of the contained conjunct.
+// If it is a Field, it will return the field value.
+func (c *Conjunct) Elem() Elem {
+ switch x := c.x.(type) {
+ case interface{ expr() Expr }:
+ return x.expr()
+ case Elem:
+ return x
+ default:
+ panic("unreachable")
+ }
+}
+
+// Expr retrieves the expression form of the contained conjunct.
+// If it is a field or comprehension, it will return its associated value.
+func (c *Conjunct) Expr() Expr {
+ switch x := c.x.(type) {
+ case Expr:
+ return x
+ // TODO: comprehension.
+ case interface{ expr() Expr }:
+ return x.expr()
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/comprehension.go b/vendor/cuelang.org/go/internal/core/adt/comprehension.go
new file mode 100644
index 0000000000..804dc39579
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/comprehension.go
@@ -0,0 +1,70 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+type envYield struct {
+ comp *Comprehension
+ env *Environment
+ id CloseInfo
+ err *Bottom
+}
+
+func (n *nodeContext) insertComprehension(env *Environment, x *Comprehension, ci CloseInfo) {
+ n.comprehensions = append(n.comprehensions, envYield{x, env, ci, nil})
+}
+
+// injectComprehensions evaluates and inserts comprehensions.
+func (n *nodeContext) injectComprehensions(all *[]envYield) (progress bool) {
+ ctx := n.ctx
+
+ k := 0
+ for i := 0; i < len(*all); i++ {
+ d := (*all)[i]
+
+ sa := []*Environment{}
+ f := func(env *Environment) {
+ sa = append(sa, env)
+ }
+
+ if err := ctx.Yield(d.env, d.comp, f); err != nil {
+ if err.IsIncomplete() {
+ d.err = err
+ (*all)[k] = d
+ k++
+ } else {
+ // continue to collect other errors.
+ n.addBottom(err)
+ }
+ continue
+ }
+
+ if len(sa) == 0 {
+ continue
+ }
+ id := d.id.SpawnSpan(d.comp.Clauses, ComprehensionSpan)
+
+ n.ctx.nonMonotonicInsertNest++
+ for _, env := range sa {
+ n.addExprConjunct(Conjunct{env, d.comp.Value, id})
+ }
+ n.ctx.nonMonotonicInsertNest--
+ }
+
+ progress = k < len(*all)
+
+ *all = (*all)[:k]
+
+ return progress
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/context.go b/vendor/cuelang.org/go/internal/core/adt/context.go
new file mode 100644
index 0000000000..e7a6412beb
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/context.go
@@ -0,0 +1,1283 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/cockroachdb/apd/v2"
+ "golang.org/x/text/encoding/unicode"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// Debug sets whether extra aggressive checking should be done.
+// This should typically default to true for pre-releases and default to
+// false otherwise.
+var Debug bool = os.Getenv("CUE_DEBUG") != "0"
+
+// Verbosity sets the log level. There are currently only two levels:
+// 0: no logging
+// 1: logging
+var Verbosity int
+
+// DebugSort specifies that arcs be sorted consistently between implementations.
+// 0: default
+// 1: sort by Feature: this should be consistent between implementations where
+// there is no change in the compiler and indexing code.
+// 2: alphabetical
+var DebugSort int
+
+func DebugSortArcs(c *OpContext, n *Vertex) {
+ if n.IsList() {
+ return
+ }
+ switch a := n.Arcs; DebugSort {
+ case 1:
+ sort.SliceStable(a, func(i, j int) bool {
+ return a[i].Label < a[j].Label
+ })
+ case 2:
+ sort.SliceStable(a, func(i, j int) bool {
+ return a[i].Label.SelectorString(c.Runtime) <
+ a[j].Label.SelectorString(c.Runtime)
+ })
+ }
+}
+
+func DebugSortFields(c *OpContext, a []Feature) {
+ switch DebugSort {
+ case 1:
+ sort.SliceStable(a, func(i, j int) bool {
+ return a[i] < a[j]
+ })
+ case 2:
+ sort.SliceStable(a, func(i, j int) bool {
+ return a[i].SelectorString(c.Runtime) <
+ a[j].SelectorString(c.Runtime)
+ })
+ }
+}
+
+// Assert panics if the condition is false. Assert can be used to check for
+// conditions that are considers to break an internal variant or unexpected
+// condition, but that nonetheless probably will be handled correctly down the
+// line. For instance, a faulty condition could lead to to error being caught
+// down the road, but resulting in an inaccurate error message. In production
+// code it is better to deal with the bad error message than to panic.
+//
+// It is advisable for each use of Assert to document how the error is expected
+// to be handled down the line.
+func Assertf(b bool, format string, args ...interface{}) {
+ if Debug && !b {
+ panic(fmt.Sprintf("assertion failed: "+format, args...))
+ }
+}
+
+// Assertf either panics or reports an error to c if the condition is not met.
+func (c *OpContext) Assertf(pos token.Pos, b bool, format string, args ...interface{}) {
+ if !b {
+ if Debug {
+ panic(fmt.Sprintf("assertion failed: "+format, args...))
+ }
+ c.addErrf(0, pos, format, args...)
+ }
+}
+
+func init() {
+ log.SetFlags(log.Lshortfile)
+}
+
+func Logf(format string, args ...interface{}) {
+ if Verbosity == 0 {
+ return
+ }
+ s := fmt.Sprintf(format, args...)
+ _ = log.Output(2, s)
+}
+
+var pMap = map[*Vertex]int{}
+
+func (c *OpContext) Logf(v *Vertex, format string, args ...interface{}) {
+ if Verbosity == 0 {
+ return
+ }
+ if v == nil {
+ s := fmt.Sprintf(strings.Repeat("..", c.nest)+format, args...)
+ _ = log.Output(2, s)
+ return
+ }
+ p := pMap[v]
+ if p == 0 {
+ p = len(pMap) + 1
+ pMap[v] = p
+ }
+ a := append([]interface{}{
+ strings.Repeat("..", c.nest),
+ p,
+ v.Label.SelectorString(c),
+ v.Path(),
+ }, args...)
+ for i := 2; i < len(a); i++ {
+ switch x := a[i].(type) {
+ case Node:
+ a[i] = c.Str(x)
+ case Feature:
+ a[i] = x.SelectorString(c)
+ }
+ }
+ s := fmt.Sprintf("%s [%d] %s/%v"+format, a...)
+ _ = log.Output(2, s)
+}
+
+// Runtime defines an interface for low-level representation conversion and
+// lookup.
+type Runtime interface {
+ // StringIndexer allows for converting string labels to and from a
+ // canonical numeric representation.
+ StringIndexer
+
+ // LoadImport loads a unique Vertex associated with a given import path. It
+ // returns nil if no import for this package could be found.
+ LoadImport(importPath string) *Vertex
+
+ // StoreType associates a CUE expression with a Go type.
+ StoreType(t reflect.Type, src ast.Expr, expr Expr)
+
+ // LoadType retrieves a previously stored CUE expression for a given Go
+ // type if available.
+ LoadType(t reflect.Type) (src ast.Expr, expr Expr, ok bool)
+}
+
+type Config struct {
+ Runtime
+ Format func(Node) string
+}
+
+// New creates an operation context.
+func New(v *Vertex, cfg *Config) *OpContext {
+ if cfg.Runtime == nil {
+ panic("nil Runtime")
+ }
+ ctx := &OpContext{
+ Runtime: cfg.Runtime,
+ Format: cfg.Format,
+ vertex: v,
+ }
+ if v != nil {
+ ctx.e = &Environment{Up: nil, Vertex: v}
+ }
+ return ctx
+}
+
+// An OpContext implements CUE's unification operation. It's operations only
+// operation on values that are created with the Runtime with which an OpContext
+// is associated. An OpContext is not goroutine save and only one goroutine may
+// use an OpContext at a time.
+//
+type OpContext struct {
+ Runtime
+ Format func(Node) string
+
+ nest int
+
+ stats Stats
+ freeListNode *nodeContext
+
+ e *Environment
+ src ast.Node
+ errs *Bottom
+ positions []Node // keep track of error positions
+
+ // vertex is used to determine the path location in case of error. Turning
+ // this into a stack could also allow determining the cyclic path for
+ // structural cycle errors.
+ vertex *Vertex
+
+ nonMonotonicLookupNest int32
+ nonMonotonicRejectNest int32
+ nonMonotonicInsertNest int32
+ nonMonotonicGeneration int32
+
+ // These fields are used associate scratch fields for computing closedness
+ // of a Vertex. These fields could have been included in StructInfo (like
+ // Tomabechi's unification algorithm), but we opted for an indirection to
+ // allow concurrent unification.
+ //
+ // TODO(perf): have two generations: one for each pass of the closedness
+ // algorithm, so that the results of the first pass can be reused for all
+ // features of a node.
+ generation int
+ closed map[*closeInfo]*closeStats
+ todo *closeStats
+
+ // inDisjunct indicates that non-monotonic checks should be skipped.
+ // This is used if we want to do some extra work to eliminate disjunctions
+ // early. The result of unificantion should be thrown away if this check is
+ // used.
+ //
+ // TODO: replace this with a mechanism to determine the correct set (per
+ // conjunct) of StructInfos to include in closedness checking.
+ inDisjunct int
+
+ // inConstaint overrides inDisjunct as field matching should always be
+ // enabled.
+ inConstraint int
+}
+
+func (n *nodeContext) skipNonMonotonicChecks() bool {
+ if n.ctx.inConstraint > 0 {
+ return false
+ }
+ return n.ctx.inDisjunct > 0
+}
+
+// Impl is for internal use only. This will go.
+func (c *OpContext) Impl() Runtime {
+ return c.Runtime
+}
+
+func (c *OpContext) Pos() token.Pos {
+ if c.src == nil {
+ return token.NoPos
+ }
+ return c.src.Pos()
+}
+
+func (c *OpContext) Source() ast.Node {
+ return c.src
+}
+
+// NewContext creates an operation context.
+func NewContext(r Runtime, v *Vertex) *OpContext {
+ return New(v, &Config{Runtime: r})
+}
+
+func (c *OpContext) pos() token.Pos {
+ if c.src == nil {
+ return token.NoPos
+ }
+ return c.src.Pos()
+}
+
+func (c *OpContext) spawn(node *Vertex) *Environment {
+ node.Parent = c.e.Vertex // TODO: Is this necessary?
+ return &Environment{
+ Up: c.e,
+ Vertex: node,
+
+ // Copy cycle data.
+ Cyclic: c.e.Cyclic,
+ Deref: c.e.Deref,
+ Cycles: c.e.Cycles,
+ }
+}
+
+func (c *OpContext) Env(upCount int32) *Environment {
+ e := c.e
+ for ; upCount > 0; upCount-- {
+ e = e.Up
+ }
+ return e
+}
+
+func (c *OpContext) relNode(upCount int32) *Vertex {
+ e := c.e
+ for ; upCount > 0; upCount-- {
+ e = e.Up
+ }
+ c.Unify(e.Vertex, Partial)
+ return e.Vertex
+}
+
+func (c *OpContext) relLabel(upCount int32) Feature {
+ // locate current label.
+ e := c.e
+ for ; upCount > 0; upCount-- {
+ e = e.Up
+ }
+ return e.DynamicLabel
+}
+
+func (c *OpContext) concreteIsPossible(op Op, x Expr) bool {
+ if !AssertConcreteIsPossible(op, x) {
+ // No need to take position of expression.
+ c.AddErr(c.NewPosf(token.NoPos,
+ "invalid operand %s ('%s' requires concrete value)", x, op))
+ return false
+ }
+ return true
+}
+
+// Assert that the given expression can evaluate to a concrete value.
+func AssertConcreteIsPossible(op Op, x Expr) bool {
+ switch v := x.(type) {
+ case *Bottom:
+ case *BoundExpr:
+ return false
+ case Value:
+ return v.Concreteness() == Concrete
+ }
+ return true
+}
+
+// HasErr reports whether any error was reported, including whether value
+// was incomplete.
+func (c *OpContext) HasErr() bool {
+ return c.errs != nil
+}
+
+func (c *OpContext) Err() *Bottom {
+ b := c.errs
+ c.errs = nil
+ return b
+}
+
+func (c *OpContext) addErrf(code ErrorCode, pos token.Pos, msg string, args ...interface{}) {
+ err := c.NewPosf(pos, msg, args...)
+ c.addErr(code, err)
+}
+
+func (c *OpContext) addErr(code ErrorCode, err errors.Error) {
+ c.AddBottom(&Bottom{Code: code, Err: err})
+}
+
+// AddBottom records an error in OpContext.
+func (c *OpContext) AddBottom(b *Bottom) {
+ c.errs = CombineErrors(c.src, c.errs, b)
+}
+
+// AddErr records an error in OpContext. It returns errors collected so far.
+func (c *OpContext) AddErr(err errors.Error) *Bottom {
+ if err != nil {
+ c.AddBottom(&Bottom{Err: err})
+ }
+ return c.errs
+}
+
+// NewErrf creates a *Bottom value and returns it. The returned uses the
+// current source as the point of origin of the error.
+func (c *OpContext) NewErrf(format string, args ...interface{}) *Bottom {
+ // TODO: consider renaming ot NewBottomf: this is now confusing as we also
+ // have Newf.
+ err := c.Newf(format, args...)
+ return &Bottom{Src: c.src, Err: err, Code: EvalError}
+}
+
+// AddErrf records an error in OpContext. It returns errors collected so far.
+func (c *OpContext) AddErrf(format string, args ...interface{}) *Bottom {
+ return c.AddErr(c.Newf(format, args...))
+}
+
+type frame struct {
+ env *Environment
+ err *Bottom
+ src ast.Node
+}
+
+func (c *OpContext) PushState(env *Environment, src ast.Node) (saved frame) {
+ saved.env = c.e
+ saved.err = c.errs
+ saved.src = c.src
+
+ c.errs = nil
+ if src != nil {
+ c.src = src
+ }
+ c.e = env
+
+ return saved
+}
+
+func (c *OpContext) PopState(s frame) *Bottom {
+ err := c.errs
+ c.e = s.env
+ c.errs = s.err
+ c.src = s.src
+ return err
+}
+
+// PushArc signals c that arc v is currently being processed for the purpose
+// of error reporting. PopArc should be called with the returned value once
+// processing of v is completed.
+func (c *OpContext) PushArc(v *Vertex) (saved *Vertex) {
+ c.vertex, saved = v, c.vertex
+ return saved
+}
+
+// PopArc signals completion of processing the current arc.
+func (c *OpContext) PopArc(saved *Vertex) {
+ c.vertex = saved
+}
+
+// Resolve finds a node in the tree.
+//
+// Should only be used to insert Conjuncts. TODO: perhaps only return Conjuncts
+// and error.
+func (c *OpContext) Resolve(env *Environment, r Resolver) (*Vertex, *Bottom) {
+ s := c.PushState(env, r.Source())
+
+ arc := r.resolve(c, Partial)
+
+ err := c.PopState(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if arc.ChildErrors != nil && arc.ChildErrors.Code == StructuralCycleError {
+ return nil, arc.ChildErrors
+ }
+
+ arc = arc.Indirect()
+
+ return arc, err
+}
+
+// Validate calls validates value for the given validator.
+//
+// TODO(errors): return boolean instead: only the caller has enough information
+// to generate a proper error message.
+func (c *OpContext) Validate(check Validator, value Value) *Bottom {
+ // TODO: use a position stack to push both values.
+ saved := c.src
+ c.src = check.Source()
+
+ err := check.validate(c, value)
+
+ c.src = saved
+
+ return err
+}
+
+// Yield evaluates a Yielder and calls f for each result.
+func (c *OpContext) Yield(env *Environment, comp *Comprehension, f YieldFunc) *Bottom {
+ y := comp.Clauses
+
+ s := c.PushState(env, y.Source())
+
+ y.yield(c, f)
+
+ return c.PopState(s)
+
+}
+
+// Concrete returns the concrete value of x after evaluating it.
+// msg is used to mention the context in which an error occurred, if any.
+func (c *OpContext) Concrete(env *Environment, x Expr, msg interface{}) (result Value, complete bool) {
+
+ w, complete := c.Evaluate(env, x)
+
+ w, ok := c.getDefault(w)
+ if !ok {
+ return w, false
+ }
+ v := Unwrap(w)
+
+ if !IsConcrete(v) {
+ complete = false
+ b := c.NewErrf("non-concrete value %v in operand to %s", w, msg)
+ b.Code = IncompleteError
+ v = b
+ }
+
+ if !complete {
+ return v, complete
+ }
+
+ return v, true
+}
+
+// getDefault resolves a disjunction to a single value. If there is no default
+// value, or if there is more than one default value, it reports an "incomplete"
+// error and return false. In all other cases it will return true, even if
+// v is already an error. v may be nil, in which case it will also return nil.
+func (c *OpContext) getDefault(v Value) (result Value, ok bool) {
+ var d *Disjunction
+ switch x := v.(type) {
+ default:
+ return v, true
+
+ case *Vertex:
+ // TODO: return vertex if not disjunction.
+ switch t := x.BaseValue.(type) {
+ case *Disjunction:
+ d = t
+
+ case *Vertex:
+ return c.getDefault(t)
+
+ default:
+ return x, true
+ }
+
+ case *Disjunction:
+ d = x
+ }
+
+ if d.NumDefaults != 1 {
+ c.addErrf(IncompleteError, c.pos(),
+ "unresolved disjunction %s (type %s)", d, d.Kind())
+ return nil, false
+ }
+ return c.getDefault(d.Values[0])
+}
+
+// Evaluate evaluates an expression within the given environment and indicates
+// whether the result is complete. It will always return a non-nil result.
+func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete bool) {
+ s := c.PushState(env, x.Source())
+
+ val := c.evalState(x, Partial)
+
+ complete = true
+
+ if err, _ := val.(*Bottom); err != nil && err.IsIncomplete() {
+ complete = false
+ }
+ if val == nil {
+ complete = false
+ // TODO ENSURE THIS DOESN"T HAPPEN>
+ val = &Bottom{
+ Code: IncompleteError,
+ Err: c.Newf("UNANTICIPATED ERROR"),
+ }
+
+ }
+
+ _ = c.PopState(s)
+
+ if !complete || val == nil {
+ return val, false
+ }
+
+ return val, true
+}
+
+func (c *OpContext) evaluateRec(env *Environment, x Expr, state VertexStatus) Value {
+ s := c.PushState(env, x.Source())
+
+ val := c.evalState(x, state)
+ if val == nil {
+ // Be defensive: this never happens, but just in case.
+ Assertf(false, "nil return value: unspecified error")
+ val = &Bottom{
+ Code: IncompleteError,
+ Err: c.Newf("UNANTICIPATED ERROR"),
+ }
+ }
+ _ = c.PopState(s)
+
+ return val
+}
+
+// value evaluates expression v within the current environment. The result may
+// be nil if the result is incomplete. value leaves errors untouched to that
+// they can be collected by the caller.
+func (c *OpContext) value(x Expr) (result Value) {
+ v := c.evalState(x, Partial)
+
+ v, _ = c.getDefault(v)
+ v = Unwrap(v)
+ return v
+}
+
+func (c *OpContext) evalState(v Expr, state VertexStatus) (result Value) {
+ savedSrc := c.src
+ c.src = v.Source()
+ err := c.errs
+ c.errs = nil
+
+ defer func() {
+ c.errs = CombineErrors(c.src, c.errs, err)
+
+ if v, ok := result.(*Vertex); ok {
+ if b, _ := v.BaseValue.(*Bottom); b != nil {
+ switch b.Code {
+ case IncompleteError:
+ case CycleError:
+ if state == Partial {
+ break
+ }
+ fallthrough
+ default:
+ result = b
+ }
+ }
+ }
+
+ // TODO: remove this when we handle errors more principally.
+ if b, ok := result.(*Bottom); ok {
+ if c.src != nil &&
+ b.Code == CycleError &&
+ len(errors.Positions(b.Err)) == 0 {
+ bb := *b
+ bb.Err = errors.Wrapf(b.Err, c.src.Pos(), "")
+ result = &bb
+ }
+ if c.errs != result {
+ c.errs = CombineErrors(c.src, c.errs, result)
+ }
+ }
+ if c.errs != nil {
+ result = c.errs
+ }
+ c.src = savedSrc
+ }()
+
+ switch x := v.(type) {
+ case Value:
+ return x
+
+ case Evaluator:
+ v := x.evaluate(c)
+ return v
+
+ case Resolver:
+ arc := x.resolve(c, state)
+ if c.HasErr() {
+ return nil
+ }
+ if arc == nil {
+ return nil
+ }
+
+ v := c.evaluate(arc, state)
+ return v
+
+ default:
+ // This can only happen, really, if v == nil, which is not allowed.
+ panic(fmt.Sprintf("unexpected Expr type %T", v))
+ }
+}
+
+// unifyNode returns a possibly partially evaluated node value.
+//
+// TODO: maybe return *Vertex, *Bottom
+//
+func (c *OpContext) unifyNode(v Expr, state VertexStatus) (result Value) {
+ savedSrc := c.src
+ c.src = v.Source()
+ err := c.errs
+ c.errs = nil
+
+ defer func() {
+ c.errs = CombineErrors(c.src, c.errs, err)
+
+ if v, ok := result.(*Vertex); ok {
+ if b, _ := v.BaseValue.(*Bottom); b != nil {
+ switch b.Code {
+ case IncompleteError:
+ case CycleError:
+ if state == Partial {
+ break
+ }
+ fallthrough
+ default:
+ result = b
+ }
+ }
+ }
+
+ // TODO: remove this when we handle errors more principally.
+ if b, ok := result.(*Bottom); ok {
+ if c.src != nil &&
+ b.Code == CycleError &&
+ b.Err.Position() == token.NoPos &&
+ len(b.Err.InputPositions()) == 0 {
+ bb := *b
+ bb.Err = errors.Wrapf(b.Err, c.src.Pos(), "")
+ result = &bb
+ }
+ c.errs = CombineErrors(c.src, c.errs, result)
+ }
+ if c.errs != nil {
+ result = c.errs
+ }
+ c.src = savedSrc
+ }()
+
+ switch x := v.(type) {
+ case Value:
+ return x
+
+ case Evaluator:
+ v := x.evaluate(c)
+ return v
+
+ case Resolver:
+ v := x.resolve(c, state)
+ if c.HasErr() {
+ return nil
+ }
+ if v == nil {
+ return nil
+ }
+
+ if v.isUndefined() || state > v.status {
+ // Keep a minimum state of AllArcs.
+ // TODO: AllArcs may still not be achieved if a node is currently
+ // evaluating.
+ state := state
+ if state < AllArcs {
+ state = AllArcs
+ }
+ // Use node itself to allow for cycle detection.
+ c.Unify(v, state)
+ }
+
+ return v
+
+ default:
+ // This can only happen, really, if v == nil, which is not allowed.
+ panic(fmt.Sprintf("unexpected Expr type %T", v))
+ }
+}
+
+func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state VertexStatus) *Vertex {
+ if l == InvalidLabel || x == nil {
+ // TODO: is it possible to have an invalid label here? Maybe through the
+ // API?
+ return &Vertex{}
+ }
+
+ // var kind Kind
+ // if x.BaseValue != nil {
+ // kind = x.BaseValue.Kind()
+ // }
+
+ switch x.BaseValue.(type) {
+ case *StructMarker:
+ if l.Typ() == IntLabel {
+ c.addErrf(0, pos, "invalid struct selector %s (type int)", l)
+ return nil
+ }
+
+ case *ListMarker:
+ switch {
+ case l.Typ() == IntLabel:
+ switch {
+ case l.Index() < 0:
+ c.addErrf(0, pos, "invalid list index %s (index must be non-negative)", l)
+ return nil
+ case l.Index() > len(x.Arcs):
+ c.addErrf(0, pos, "invalid list index %s (out of bounds)", l)
+ return nil
+ }
+
+ case l.IsDef(), l.IsHidden():
+
+ default:
+ c.addErrf(0, pos, "invalid list index %s (type string)", l)
+ return nil
+ }
+
+ case nil:
+ // c.addErrf(IncompleteError, pos, "incomplete value %s", x)
+ // return nil
+
+ case *Bottom:
+
+ default:
+ kind := x.BaseValue.Kind()
+ if kind&(ListKind|StructKind) != 0 {
+ // c.addErrf(IncompleteError, pos,
+ // "cannot look up %s in incomplete type %s (type %s)",
+ // l, x.Source(), kind)
+ // return nil
+ } else if !l.IsDef() && !l.IsHidden() {
+ c.addErrf(0, pos,
+ "invalid selector %s for value of type %s", l, kind)
+ return nil
+ }
+ }
+
+ a := x.Lookup(l)
+ if a != nil {
+ a = a.Indirect()
+ }
+
+ var hasCycle bool
+outer:
+ switch {
+ case c.nonMonotonicLookupNest == 0 && c.nonMonotonicRejectNest == 0:
+ case a != nil:
+ if state == Partial {
+ a.nonMonotonicLookupGen = c.nonMonotonicGeneration
+ }
+
+ case x.state != nil && state == Partial:
+ for _, e := range x.state.exprs {
+ if isCyclePlaceholder(e.err) {
+ hasCycle = true
+ }
+ }
+ for _, a := range x.state.usedArcs {
+ if a.Label == l {
+ a.nonMonotonicLookupGen = c.nonMonotonicGeneration
+ if c.nonMonotonicRejectNest > 0 {
+ a.nonMonotonicReject = true
+ }
+ break outer
+ }
+ }
+ a := &Vertex{Label: l, nonMonotonicLookupGen: c.nonMonotonicGeneration}
+ if c.nonMonotonicRejectNest > 0 {
+ a.nonMonotonicReject = true
+ }
+ x.state.usedArcs = append(x.state.usedArcs, a)
+ }
+
+ if a != nil && state > a.status {
+ c.Unify(a, state)
+ }
+
+ if a == nil {
+ if x.state != nil {
+ for _, e := range x.state.exprs {
+ if isCyclePlaceholder(e.err) {
+ hasCycle = true
+ }
+ }
+ }
+ code := IncompleteError
+ if !x.Accept(c, l) {
+ code = 0
+ } else if hasCycle {
+ code = CycleError
+ }
+ // TODO: if the struct was a literal struct, we can also treat it as
+ // closed and make this a permanent error.
+ label := l.SelectorString(c.Runtime)
+
+ // TODO(errors): add path reference and make message
+ // "undefined field %s in %s"
+ if l.IsInt() {
+ c.addErrf(code, pos, "index out of range [%d] with length %d",
+ l.Index(), len(x.Elems()))
+ } else {
+ if code != 0 && x.IsOptional(l) {
+ c.addErrf(code, pos,
+ "cannot reference optional field: %s", label)
+ } else {
+ c.addErrf(code, pos, "undefined field: %s", label)
+ }
+ }
+ }
+ return a
+}
+
+func (c *OpContext) Label(src Expr, x Value) Feature {
+ return labelFromValue(c, src, x)
+}
+
+func (c *OpContext) typeError(v Value, k Kind) {
+ if isError(v) {
+ return
+ }
+ if !IsConcrete(v) && v.Kind()&k != 0 {
+ c.addErrf(IncompleteError, pos(v), "incomplete %s: %s", k, v)
+ } else {
+ c.AddErrf("cannot use %s (type %s) as type %s", v, v.Kind(), k)
+ }
+}
+
+func (c *OpContext) typeErrorAs(v Value, k Kind, as interface{}) {
+ if as == nil {
+ c.typeError(v, k)
+ return
+ }
+ if isError(v) {
+ return
+ }
+ if !IsConcrete(v) && v.Kind()&k != 0 {
+ c.addErrf(IncompleteError, pos(v),
+ "incomplete %s in %v: %s", k, as, v)
+ } else {
+ c.AddErrf("cannot use %s (type %s) as type %s in %v", v, v.Kind(), k, as)
+ }
+}
+
+var emptyNode = &Vertex{}
+
+func pos(x Node) token.Pos {
+ if x.Source() == nil {
+ return token.NoPos
+ }
+ return x.Source().Pos()
+}
+
+func (c *OpContext) node(orig Node, x Expr, scalar bool, state VertexStatus) *Vertex {
+ // TODO: always get the vertex. This allows a whole bunch of trickery
+ // down the line.
+ v := c.unifyNode(x, state)
+
+ v, ok := c.getDefault(v)
+ if !ok {
+ // Error already generated by getDefault.
+ return emptyNode
+ }
+
+ // The two if blocks below are rather subtle. If we have an error of
+ // the sentinel value cycle, we have earlier determined that the cycle is
+ // allowed and that it can be ignored here. Any other CycleError is an
+ // annotated cycle error that could be taken as is.
+ // TODO: do something simpler.
+ if scalar {
+ if w := Unwrap(v); !isCyclePlaceholder(w) {
+ v = w
+ }
+ }
+
+ node, ok := v.(*Vertex)
+ if ok && !isCyclePlaceholder(node.BaseValue) {
+ v = node.Value()
+ }
+
+ switch nv := v.(type) {
+ case nil:
+ switch orig.(type) {
+ case *ForClause:
+ c.addErrf(IncompleteError, pos(x),
+ "cannot range over %s (incomplete)", x)
+ default:
+ c.addErrf(IncompleteError, pos(x),
+ "%s undefined (%s is incomplete)", orig, x)
+ }
+ return emptyNode
+
+ case *Bottom:
+ // TODO: this is a bit messy. In some cases errors are already added
+ // and in some cases not. Not a huge deal, as errors will be uniqued
+ // down the line, but could be better.
+ c.AddBottom(nv)
+ return emptyNode
+
+ case *Vertex:
+ if node == nil {
+ panic("unexpected markers with nil node")
+ }
+
+ default:
+ if kind := v.Kind(); kind&StructKind != 0 {
+ switch orig.(type) {
+ case *ForClause:
+ c.addErrf(IncompleteError, pos(x),
+ "cannot range over %s (incomplete type %s)", x, kind)
+ default:
+ c.addErrf(IncompleteError, pos(x),
+ "%s undefined as %s is incomplete (type %s)", orig, x, kind)
+ }
+ return emptyNode
+
+ } else if !ok {
+ c.addErrf(0, pos(x), // TODO(error): better message.
+ "invalid operand %s (found %s, want list or struct)",
+ x.Source(), v.Kind())
+ return emptyNode
+ }
+ }
+
+ return node
+}
+
+// Elems returns the elements of a list.
+func (c *OpContext) Elems(v Value) []*Vertex {
+ list := c.list(v)
+ return list.Elems()
+}
+
+func (c *OpContext) list(v Value) *Vertex {
+ x, ok := v.(*Vertex)
+ if !ok || !x.IsList() {
+ c.typeError(v, ListKind)
+ return emptyNode
+ }
+ return x
+}
+
+func (c *OpContext) scalar(v Value) Value {
+ v = Unwrap(v)
+ switch v.(type) {
+ case *Null, *Bool, *Num, *String, *Bytes:
+ default:
+ c.typeError(v, ScalarKinds)
+ }
+ return v
+}
+
+var zero = &Num{K: NumKind}
+
+func (c *OpContext) Num(v Value, as interface{}) *Num {
+ v = Unwrap(v)
+ if isError(v) {
+ return zero
+ }
+ x, ok := v.(*Num)
+ if !ok {
+ c.typeErrorAs(v, NumKind, as)
+ return zero
+ }
+ return x
+}
+
+func (c *OpContext) Int64(v Value) int64 {
+ v = Unwrap(v)
+ if isError(v) {
+ return 0
+ }
+ x, ok := v.(*Num)
+ if !ok {
+ c.typeError(v, IntKind)
+ return 0
+ }
+ i, err := x.X.Int64()
+ if err != nil {
+ c.AddErrf("number is not an int64: %v", err)
+ return 0
+ }
+ return i
+}
+
+func (c *OpContext) uint64(v Value, as string) uint64 {
+ v = Unwrap(v)
+ if isError(v) {
+ return 0
+ }
+ x, ok := v.(*Num)
+ if !ok {
+ c.typeErrorAs(v, IntKind, as)
+ return 0
+ }
+ if x.X.Negative {
+ // TODO: improve message
+ c.AddErrf("cannot convert negative number to uint64")
+ return 0
+ }
+ if !x.X.Coeff.IsUint64() {
+ // TODO: improve message
+ c.AddErrf("cannot convert number %s to uint64", x.X)
+ return 0
+ }
+ return x.X.Coeff.Uint64()
+}
+
+func (c *OpContext) BoolValue(v Value) bool {
+ return c.boolValue(v, nil)
+}
+
+func (c *OpContext) boolValue(v Value, as interface{}) bool {
+ v = Unwrap(v)
+ if isError(v) {
+ return false
+ }
+ x, ok := v.(*Bool)
+ if !ok {
+ c.typeErrorAs(v, BoolKind, as)
+ return false
+ }
+ return x.B
+}
+
+func (c *OpContext) StringValue(v Value) string {
+ return c.stringValue(v, nil)
+}
+
+// ToBytes returns the bytes value of a scalar value.
+func (c *OpContext) ToBytes(v Value) []byte {
+ if x, ok := v.(*Bytes); ok {
+ return x.B
+ }
+ return []byte(c.ToString(v))
+}
+
+// ToString returns the string value of a scalar value.
+func (c *OpContext) ToString(v Value) string {
+ return c.toStringValue(v, StringKind|NumKind|BytesKind|BoolKind, nil)
+
+}
+
+func (c *OpContext) stringValue(v Value, as interface{}) string {
+ return c.toStringValue(v, StringKind, as)
+}
+
+func (c *OpContext) toStringValue(v Value, k Kind, as interface{}) string {
+ v = Unwrap(v)
+ if isError(v) {
+ return ""
+ }
+ if v.Kind()&k == 0 {
+ if as == nil {
+ c.typeError(v, k)
+ } else {
+ c.typeErrorAs(v, k, as)
+ }
+ return ""
+ }
+ switch x := v.(type) {
+ case *String:
+ return x.Str
+
+ case *Bytes:
+ return bytesToString(x.B)
+
+ case *Num:
+ return x.X.String()
+
+ case *Bool:
+ if x.B {
+ return "true"
+ }
+ return "false"
+
+ default:
+ c.addErrf(IncompleteError, c.pos(),
+ "non-concrete value %s (type %s)", v, v.Kind())
+ }
+ return ""
+}
+
+func bytesToString(b []byte) string {
+ b, _ = unicode.UTF8.NewDecoder().Bytes(b)
+ return string(b)
+}
+
+func (c *OpContext) bytesValue(v Value, as interface{}) []byte {
+ v = Unwrap(v)
+ if isError(v) {
+ return nil
+ }
+ x, ok := v.(*Bytes)
+ if !ok {
+ c.typeErrorAs(v, BytesKind, as)
+ return nil
+ }
+ return x.B
+}
+
+var matchNone = regexp.MustCompile("^$")
+
+func (c *OpContext) regexp(v Value) *regexp.Regexp {
+ v = Unwrap(v)
+ if isError(v) {
+ return matchNone
+ }
+ switch x := v.(type) {
+ case *String:
+ if x.RE != nil {
+ return x.RE
+ }
+ // TODO: synchronization
+ p, err := regexp.Compile(x.Str)
+ if err != nil {
+ // FatalError? How to cache error
+ c.AddErrf("invalid regexp: %s", err)
+ x.RE = matchNone
+ } else {
+ x.RE = p
+ }
+ return x.RE
+
+ case *Bytes:
+ if x.RE != nil {
+ return x.RE
+ }
+ // TODO: synchronization
+ p, err := regexp.Compile(string(x.B))
+ if err != nil {
+ c.AddErrf("invalid regexp: %s", err)
+ x.RE = matchNone
+ } else {
+ x.RE = p
+ }
+ return x.RE
+
+ default:
+ c.typeError(v, StringKind|BytesKind)
+ return matchNone
+ }
+}
+
+// newNum creates a new number of the given kind. It reports an error value
+// instead if any error occurred.
+func (c *OpContext) newNum(d *apd.Decimal, k Kind, sources ...Node) Value {
+ if c.HasErr() {
+ return c.Err()
+ }
+ return &Num{Src: c.src, X: *d, K: k}
+}
+
+func (c *OpContext) NewInt64(n int64, sources ...Node) Value {
+ if c.HasErr() {
+ return c.Err()
+ }
+ d := apd.New(n, 0)
+ return &Num{Src: c.src, X: *d, K: IntKind}
+}
+
+func (c *OpContext) NewString(s string) Value {
+ if c.HasErr() {
+ return c.Err()
+ }
+ return &String{Src: c.src, Str: s}
+}
+
+func (c *OpContext) newBytes(b []byte) Value {
+ if c.HasErr() {
+ return c.Err()
+ }
+ return &Bytes{Src: c.src, B: b}
+}
+
+func (c *OpContext) newBool(b bool) Value {
+ if c.HasErr() {
+ return c.Err()
+ }
+ return &Bool{Src: c.src, B: b}
+}
+
+func (c *OpContext) newList(src ast.Node, parent *Vertex) *Vertex {
+ return &Vertex{Parent: parent, BaseValue: &ListMarker{}}
+}
+
+// Str reports a debug string of x.
+func (c *OpContext) Str(x Node) string {
+ if c.Format == nil {
+ return fmt.Sprintf("%T", x)
+ }
+ return c.Format(x)
+}
+
+// NewList returns a new list for the given values.
+func (c *OpContext) NewList(values ...Value) *Vertex {
+ // TODO: consider making this a literal list instead.
+ list := &ListLit{}
+ v := &Vertex{
+ Conjuncts: []Conjunct{{Env: nil, x: list}},
+ }
+
+ for _, x := range values {
+ list.Elems = append(list.Elems, x)
+ }
+ c.Unify(v, Finalized)
+ return v
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/decimal.go b/vendor/cuelang.org/go/internal/core/adt/decimal.go
new file mode 100644
index 0000000000..e7eba38566
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/decimal.go
@@ -0,0 +1,131 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "math/big"
+
+ "github.com/cockroachdb/apd/v2"
+)
+
+var apdCtx apd.Context
+
+func init() {
+ apdCtx = apd.BaseContext
+ apdCtx.Precision = 24
+}
+
+func (n *Num) Impl() *apd.Decimal {
+ return &n.X
+}
+
+func (n *Num) Negative() bool {
+ return n.X.Negative
+}
+
+func (a *Num) Cmp(b *Num) int {
+ return a.X.Cmp(&b.X)
+}
+
+func (c *OpContext) Add(a, b *Num) Value {
+ return numOp(c, apdCtx.Add, a, b)
+}
+
+func (c *OpContext) Sub(a, b *Num) Value {
+ return numOp(c, apdCtx.Sub, a, b)
+}
+
+func (c *OpContext) Mul(a, b *Num) Value {
+ return numOp(c, apdCtx.Mul, a, b)
+}
+
+func (c *OpContext) Quo(a, b *Num) Value {
+ v := numOp(c, apdCtx.Quo, a, b)
+ if n, ok := v.(*Num); ok {
+ n.K = FloatKind
+ }
+ return v
+}
+
+func (c *OpContext) Pow(a, b *Num) Value {
+ return numOp(c, apdCtx.Pow, a, b)
+}
+
+type numFunc func(z, x, y *apd.Decimal) (apd.Condition, error)
+
+func numOp(c *OpContext, fn numFunc, x, y *Num) Value {
+ var d apd.Decimal
+
+ cond, err := fn(&d, &x.X, &y.X)
+
+ if err != nil {
+ return c.NewErrf("failed arithmetic: %v", err)
+ }
+
+ if cond.DivisionByZero() {
+ return c.NewErrf("division by zero")
+ }
+
+ k := x.Kind() & y.Kind()
+ if k == 0 {
+ k = FloatKind
+ }
+ return c.newNum(&d, k)
+}
+
+func (c *OpContext) IntDiv(a, b *Num) Value {
+ return intDivOp(c, (*big.Int).Div, a, b)
+}
+
+func (c *OpContext) IntMod(a, b *Num) Value {
+ return intDivOp(c, (*big.Int).Mod, a, b)
+}
+
+func (c *OpContext) IntQuo(a, b *Num) Value {
+ return intDivOp(c, (*big.Int).Quo, a, b)
+}
+
+func (c *OpContext) IntRem(a, b *Num) Value {
+ return intDivOp(c, (*big.Int).Rem, a, b)
+}
+
+type intFunc func(z, x, y *big.Int) *big.Int
+
+func intDivOp(c *OpContext, fn intFunc, a, b *Num) Value {
+ if b.X.IsZero() {
+ return c.NewErrf("division by zero")
+ }
+
+ var x, y apd.Decimal
+ _, _ = apdCtx.RoundToIntegralValue(&x, &a.X)
+ if x.Negative {
+ x.Coeff.Neg(&x.Coeff)
+ }
+ _, _ = apdCtx.RoundToIntegralValue(&y, &b.X)
+ if y.Negative {
+ y.Coeff.Neg(&y.Coeff)
+ }
+
+ var d apd.Decimal
+
+ fn(&d.Coeff, &x.Coeff, &y.Coeff)
+
+ if d.Coeff.Sign() < 0 {
+ d.Coeff.Neg(&d.Coeff)
+ d.Negative = true
+ }
+
+ return c.newNum(&d, IntKind)
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/default.go b/vendor/cuelang.org/go/internal/core/adt/default.go
new file mode 100644
index 0000000000..6e6f1b19f4
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/default.go
@@ -0,0 +1,132 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// Default returns the default value or itself if there is no default.
+func Default(v Value) Value {
+ switch x := v.(type) {
+ case *Vertex:
+ return x.Default()
+ case *Disjunction:
+ return x.Default()
+ default:
+ return v
+ }
+}
+
+func (d *Disjunction) Default() Value {
+ switch d.NumDefaults {
+ case 0:
+ return d
+ case 1:
+ return d.Values[0]
+ default:
+ return &Disjunction{
+ Src: d.Src,
+ Values: d.Values[:d.NumDefaults],
+ NumDefaults: 0,
+ }
+ }
+}
+
+// Default returns the default value or itself if there is no default.
+//
+// It also closes a list, representing its default value.
+func (v *Vertex) Default() *Vertex {
+ switch d := v.BaseValue.(type) {
+ default:
+ return v
+
+ case *Disjunction:
+ var w *Vertex
+
+ switch d.NumDefaults {
+ case 0:
+ return v
+ case 1:
+ w = d.Values[0].Default()
+ default:
+ x := *v
+ x.state = nil
+ x.BaseValue = &Disjunction{
+ Src: d.Src,
+ Values: d.Values[:d.NumDefaults],
+ NumDefaults: 0,
+ }
+ w = &x
+ w.Conjuncts = nil
+ }
+
+ if w.Conjuncts == nil {
+ for _, c := range v.Conjuncts {
+ // TODO: preserve field information.
+ expr, _ := stripNonDefaults(c.Elem())
+ w.Conjuncts = append(w.Conjuncts, MakeRootConjunct(c.Env, expr))
+ }
+ }
+ return w
+
+ case *ListMarker:
+ m := *d
+ m.IsOpen = false
+
+ w := *v
+ w.BaseValue = &m
+ w.state = nil
+ return &w
+ }
+}
+
+// TODO: this should go: record preexpanded disjunctions in Vertex.
+func stripNonDefaults(elem Elem) (r Elem, stripped bool) {
+ expr, ok := elem.(Expr)
+ if !ok {
+ return elem, false
+ }
+ switch x := expr.(type) {
+ case *DisjunctionExpr:
+ if !x.HasDefaults {
+ return x, false
+ }
+ d := *x
+ d.Values = []Disjunct{}
+ for _, v := range x.Values {
+ if v.Default {
+ d.Values = append(d.Values, v)
+ }
+ }
+ if len(d.Values) == 1 {
+ return d.Values[0].Val, true
+ }
+ return &d, true
+
+ case *BinaryExpr:
+ if x.Op != AndOp {
+ return x, false
+ }
+ a, sa := stripNonDefaults(x.X)
+ b, sb := stripNonDefaults(x.Y)
+ if sa || sb {
+ bin := *x
+ bin.X = a.(Expr)
+ bin.Y = b.(Expr)
+ return &bin, true
+ }
+ return x, false
+
+ default:
+ return x, false
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct.go b/vendor/cuelang.org/go/internal/core/adt/disjunct.go
new file mode 100644
index 0000000000..8f2074a40a
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/disjunct.go
@@ -0,0 +1,588 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// Nodes man not reenter a disjunction.
+//
+// Copy one layer deep; throw away items on failure.
+
+// DISJUNCTION ALGORITHM
+//
+// The basic concept of the algorithm is to use backtracking to find valid
+// disjunctions. The algorithm can stop if two matching disjuncts are found
+// where one does not subsume the other.
+//
+// At a later point, we can introduce a filter step to filter out possible
+// disjuncts based on, say, discriminator fields or field exclusivity (oneOf
+// fields in Protobuf).
+//
+// To understand the details of the algorithm, it is important to understand
+// some properties of disjunction.
+//
+//
+// EVALUATION OF A DISJUNCTION IS SELF CONTAINED
+//
+// In other words, fields outside of a disjunction cannot bind to values within
+// a disjunction whilst evaluating that disjunction. This allows the computation
+// of disjunctions to be isolated from side effects.
+//
+// The intuition behind this is as follows: as a disjunction is not a concrete
+// value, it is not possible to lookup a field within a disjunction if it has
+// not yet been evaluated. So if a reference within a disjunction that is needed
+// to disambiguate that disjunction refers to a field outside the scope of the
+// disjunction which, in turn, refers to a field within the disjunction, this
+// results in a cycle error. We achieve this by not removing the cycle marker of
+// the Vertex of the disjunction until the disjunction is resolved.
+//
+// Note that the following disjunct is still allowed:
+//
+// a: 1
+// b: a
+//
+// Even though `a` refers to the root of the disjunction, it does not _select
+// into_ the disjunction. Implementation-wise, it also doesn't have to, as the
+// respective vertex is available within the Environment. Referencing a node
+// outside the disjunction that in turn selects the disjunction root, however,
+// will result in a detected cycle.
+//
+// As usual, cycle detection should be interpreted marked as incomplete, so that
+// the referring node will not be fixed to an error prematurely.
+//
+//
+// SUBSUMPTION OF AMBIGUOUS DISJUNCTS
+//
+// A disjunction can be evaluated to a concrete value if only one disjunct
+// remains. Aside from disambiguating through unification failure, disjuncts
+// may also be disambiguated by taking the least specific of two disjuncts.
+// For instance, if a subsumes b, then the result of disjunction may be a.
+//
+// NEW ALGORITHM NO LONGER VERIFIES SUBSUMPTION. SUBSUMPTION IS INHERENTLY
+// IMPRECISE (DUE TO BULK OPTIONAL FIELDS). OTHER THAN THAT, FOR SCALAR VALUES
+// IT JUST MEANS THERE IS AMBIGUITY, AND FOR STRUCTS IT CAN LEAD TO STRANGE
+// CONSEQUENCES.
+//
+// USE EQUALITY INSTEAD:
+// - Undefined == error for optional fields.
+// - So only need to check exact labels for vertices.
+
+type envDisjunct struct {
+ env *Environment
+ cloneID CloseInfo
+ expr *DisjunctionExpr
+ value *Disjunction
+ hasDefaults bool
+
+ // These are used for book keeping, tracking whether any of the
+ // disjuncts marked with a default marker remains after unification.
+ // If no default is used, all other elements are treated as "maybeDefault".
+ // Otherwise, elements are treated as is.
+ parentDefaultUsed bool
+ childDefaultUsed bool
+}
+
+func (n *nodeContext) addDisjunction(env *Environment, x *DisjunctionExpr, cloneID CloseInfo) {
+
+ // TODO: precompute
+ numDefaults := 0
+ for _, v := range x.Values {
+ isDef := v.Default // || n.hasDefaults(env, v.Val)
+ if isDef {
+ numDefaults++
+ }
+ }
+
+ n.disjunctions = append(n.disjunctions,
+ envDisjunct{env, cloneID, x, nil, numDefaults > 0, false, false})
+}
+
+func (n *nodeContext) addDisjunctionValue(env *Environment, x *Disjunction, cloneID CloseInfo) {
+ n.disjunctions = append(n.disjunctions,
+ envDisjunct{env, cloneID, nil, x, x.HasDefaults, false, false})
+
+}
+
+func (n *nodeContext) expandDisjuncts(
+ state VertexStatus,
+ parent *nodeContext,
+ parentMode defaultMode, // default mode of this disjunct
+ recursive, last bool) {
+
+ n.ctx.stats.DisjunctCount++
+
+ node := n.node
+ defer func() {
+ n.node = node
+ }()
+
+ for n.expandOne() {
+ }
+
+ // save node to snapShot in nodeContex
+ // save nodeContext.
+
+ if recursive || len(n.disjunctions) > 0 {
+ n.snapshot = clone(*n.node)
+ } else {
+ n.snapshot = *n.node
+ }
+
+ defaultOffset := len(n.usedDefault)
+
+ switch {
+ default: // len(n.disjunctions) == 0
+ m := *n
+ n.postDisjunct(state)
+
+ switch {
+ case n.hasErr():
+ // TODO: consider finalizing the node thusly:
+ // if recursive {
+ // n.node.Finalize(n.ctx)
+ // }
+ x := n.node
+ err, ok := x.BaseValue.(*Bottom)
+ if !ok {
+ err = n.getErr()
+ }
+ if err == nil {
+ // TODO(disjuncts): Is this always correct? Especially for partial
+ // evaluation it is okay for child errors to have incomplete errors.
+ // Perhaps introduce an Err() method.
+ err = x.ChildErrors
+ }
+ if err.IsIncomplete() {
+ break
+ }
+ if err != nil {
+ parent.disjunctErrs = append(parent.disjunctErrs, err)
+ }
+ if recursive {
+ n.free()
+ }
+ return
+ }
+
+ if recursive {
+ *n = m
+ n.result = *n.node // XXX: n.result = snapshotVertex(n.node)?
+ n.node = &n.result
+ n.disjuncts = append(n.disjuncts, n)
+ }
+ if n.node.BaseValue == nil {
+ n.node.BaseValue = n.getValidators()
+ }
+
+ n.usedDefault = append(n.usedDefault, defaultInfo{
+ parentMode: parentMode,
+ nestedMode: parentMode,
+ origMode: parentMode,
+ })
+
+ case len(n.disjunctions) > 0:
+ // Process full disjuncts to ensure that erroneous disjuncts are
+ // eliminated as early as possible.
+ state = Finalized
+
+ n.disjuncts = append(n.disjuncts, n)
+
+ n.refCount++
+ defer n.free()
+
+ for i, d := range n.disjunctions {
+ a := n.disjuncts
+ n.disjuncts = n.buffer[:0]
+ n.buffer = a[:0]
+
+ last := i+1 == len(n.disjunctions)
+ skipNonMonotonicChecks := i+1 < len(n.disjunctions)
+ if skipNonMonotonicChecks {
+ n.ctx.inDisjunct++
+ }
+
+ for _, dn := range a {
+ switch {
+ case d.expr != nil:
+ for _, v := range d.expr.Values {
+ cn := dn.clone()
+ *cn.node = clone(dn.snapshot)
+ cn.node.state = cn
+
+ c := MakeConjunct(d.env, v.Val, d.cloneID)
+ cn.addExprConjunct(c)
+
+ newMode := mode(d.hasDefaults, v.Default)
+
+ cn.expandDisjuncts(state, n, newMode, true, last)
+ }
+
+ case d.value != nil:
+ for i, v := range d.value.Values {
+ cn := dn.clone()
+ *cn.node = clone(dn.snapshot)
+ cn.node.state = cn
+
+ cn.addValueConjunct(d.env, v, d.cloneID)
+
+ newMode := mode(d.hasDefaults, i < d.value.NumDefaults)
+
+ cn.expandDisjuncts(state, n, newMode, true, last)
+ }
+ }
+ }
+
+ if skipNonMonotonicChecks {
+ n.ctx.inDisjunct--
+ }
+
+ if len(n.disjuncts) == 0 {
+ n.makeError()
+ }
+
+ if recursive || i > 0 {
+ for _, x := range a {
+ x.free()
+ }
+ }
+
+ if len(n.disjuncts) == 0 {
+ break
+ }
+ }
+
+ // Annotate disjunctions with whether any of the default disjunctions
+ // was used.
+ for _, d := range n.disjuncts {
+ for i, info := range d.usedDefault[defaultOffset:] {
+ if info.parentMode == isDefault {
+ n.disjunctions[i].parentDefaultUsed = true
+ }
+ if info.origMode == isDefault {
+ n.disjunctions[i].childDefaultUsed = true
+ }
+ }
+ }
+
+ // Combine parent and child default markers, considering that a parent
+ // "notDefault" is treated as "maybeDefault" if none of the disjuncts
+ // marked as default remain.
+ //
+ // NOTE for a parent marked as "notDefault", a child is *never*
+ // considered as default. It may either be "not" or "maybe" default.
+ //
+ // The result for each disjunction is conjoined into a single value.
+ for _, d := range n.disjuncts {
+ m := maybeDefault
+ orig := maybeDefault
+ for i, info := range d.usedDefault[defaultOffset:] {
+ parent := info.parentMode
+
+ used := n.disjunctions[i].parentDefaultUsed
+ childUsed := n.disjunctions[i].childDefaultUsed
+ hasDefaults := n.disjunctions[i].hasDefaults
+
+ orig = combineDefault(orig, info.parentMode)
+ orig = combineDefault(orig, info.nestedMode)
+
+ switch {
+ case childUsed:
+ // One of the children used a default. This is "normal"
+ // mode. This may also happen when we are in
+ // hasDefaults/notUsed mode. Consider
+ //
+ // ("a" | "b") & (*(*"a" | string) | string)
+ //
+ // Here the doubly nested default is called twice, once
+ // for "a" and then for "b", where the second resolves to
+ // not using a default. The first does, however, and on that
+ // basis the "ot default marker cannot be overridden.
+ m = combineDefault(m, info.parentMode)
+ m = combineDefault(m, info.origMode)
+
+ case !hasDefaults, used:
+ m = combineDefault(m, info.parentMode)
+ m = combineDefault(m, info.nestedMode)
+
+ case hasDefaults && !used:
+ Assertf(parent == notDefault, "unexpected default mode")
+ }
+ }
+ d.defaultMode = m
+
+ d.usedDefault = d.usedDefault[:defaultOffset]
+ d.usedDefault = append(d.usedDefault, defaultInfo{
+ parentMode: parentMode,
+ nestedMode: m,
+ origMode: orig,
+ })
+
+ }
+
+ // TODO: this is an old trick that seems no longer necessary for the new
+ // implementation. Keep around until we finalize the semantics for
+ // defaults, though. The recursion of nested defaults is not entirely
+ // proper yet.
+ //
+ // A better approach, that avoids the need for recursion (semantically),
+ // would be to only consider default usage for one level, but then to
+ // also allow a default to be passed if only one value is remaining.
+ // This means that a nested subsumption would first have to be evaluated
+ // in isolation, however, to determine that it is not previous
+ // disjunctions that cause the disambiguation.
+ //
+ // HACK alert: this replaces the hack of the previous algorithm with a
+ // slightly less worse hack: instead of dropping the default info when
+ // the value was scalar before, we drop this information when there is
+ // only one disjunct, while not discarding hard defaults. TODO: a more
+ // principled approach would be to recognize that there is only one
+ // default at a point where this does not break commutativity. if
+ // if len(n.disjuncts) == 1 && n.disjuncts[0].defaultMode != isDefault {
+ // n.disjuncts[0].defaultMode = maybeDefault
+ // }
+ }
+
+ // Compare to root, but add to this one.
+ switch p := parent; {
+ case p != n:
+ p.disjunctErrs = append(p.disjunctErrs, n.disjunctErrs...)
+ n.disjunctErrs = n.disjunctErrs[:0]
+
+ outer:
+ for _, d := range n.disjuncts {
+ for k, v := range p.disjuncts {
+ if !d.done() || !v.done() {
+ break
+ }
+ flags := CheckStructural
+ if last {
+ flags |= IgnoreOptional
+ }
+ if Equal(n.ctx, &v.result, &d.result, flags) {
+ m := maybeDefault
+ for _, u := range d.usedDefault {
+ m = combineDefault(m, u.nestedMode)
+ }
+ if m == isDefault {
+ p.disjuncts[k] = d
+ v.free()
+ } else {
+ d.free()
+ }
+ continue outer
+ }
+ }
+
+ p.disjuncts = append(p.disjuncts, d)
+ }
+
+ n.disjuncts = n.disjuncts[:0]
+ }
+}
+
+func (n *nodeContext) makeError() {
+ code := IncompleteError
+
+ if len(n.disjunctErrs) > 0 {
+ code = EvalError
+ for _, c := range n.disjunctErrs {
+ if c.Code > code {
+ code = c.Code
+ }
+ }
+ }
+
+ b := &Bottom{
+ Code: code,
+ Err: n.disjunctError(),
+ }
+ n.node.SetValue(n.ctx, Finalized, b)
+}
+
+func mode(hasDefault, marked bool) defaultMode {
+ var mode defaultMode
+ switch {
+ case !hasDefault:
+ mode = maybeDefault
+ case marked:
+ mode = isDefault
+ default:
+ mode = notDefault
+ }
+ return mode
+}
+
+// clone makes a shallow copy of a Vertex. The purpose is to create different
+// disjuncts from the same Vertex under computation. This allows the conjuncts
+// of an arc to be reset to a previous position and the reuse of earlier
+// computations.
+//
+// Notes: only Arcs need to be copied recursively. Either the arc is finalized
+// and can be used as is, or Structs is assumed to not yet be computed at the
+// time that a clone is needed and must be nil. Conjuncts no longer needed and
+// can become nil. All other fields can be copied shallowly.
+func clone(v Vertex) Vertex {
+ v.state = nil
+ if a := v.Arcs; len(a) > 0 {
+ v.Arcs = make([]*Vertex, len(a))
+ for i, arc := range a {
+ switch arc.status {
+ case Finalized:
+ v.Arcs[i] = arc
+
+ case 0:
+ a := *arc
+ v.Arcs[i] = &a
+
+ a.Conjuncts = make([]Conjunct, len(arc.Conjuncts))
+ copy(a.Conjuncts, arc.Conjuncts)
+
+ default:
+ a := *arc
+ a.state = arc.state.clone()
+ a.state.node = &a
+ a.state.snapshot = clone(a)
+ v.Arcs[i] = &a
+ }
+ }
+ }
+
+ if a := v.Structs; len(a) > 0 {
+ v.Structs = make([]*StructInfo, len(a))
+ copy(v.Structs, a)
+ }
+
+ return v
+}
+
+// Default rules from spec:
+//
+// U1: (v1, d1) & v2 => (v1&v2, d1&v2)
+// U2: (v1, d1) & (v2, d2) => (v1&v2, d1&d2)
+//
+// D1: (v1, d1) | v2 => (v1|v2, d1)
+// D2: (v1, d1) | (v2, d2) => (v1|v2, d1|d2)
+//
+// M1: *v => (v, v)
+// M2: *(v1, d1) => (v1, d1)
+//
+// NOTE: M2 cannot be *(v1, d1) => (v1, v1), as this has the weird property
+// of making a value less specific. This causes issues, for instance, when
+// trimming.
+//
+// The old implementation does something similar though. It will discard
+// default information after first determining if more than one conjunct
+// has survived.
+//
+// def + maybe -> def
+// not + maybe -> def
+// not + def -> def
+
+type defaultMode int
+
+const (
+ maybeDefault defaultMode = iota
+ isDefault
+ notDefault
+)
+
+// combineDefaults combines default modes for unifying conjuncts.
+//
+// Default rules from spec:
+//
+// U1: (v1, d1) & v2 => (v1&v2, d1&v2)
+// U2: (v1, d1) & (v2, d2) => (v1&v2, d1&d2)
+func combineDefault(a, b defaultMode) defaultMode {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// disjunctError returns a compound error for a failed disjunction.
+//
+// TODO(perf): the set of errors is now computed during evaluation. Eventually,
+// this could be done lazily.
+func (n *nodeContext) disjunctError() (errs errors.Error) {
+ ctx := n.ctx
+
+ disjuncts := selectErrors(n.disjunctErrs)
+
+ if disjuncts == nil {
+ errs = ctx.Newf("empty disjunction") // XXX: add space to sort first
+ } else {
+ disjuncts = errors.Sanitize(disjuncts)
+ k := len(errors.Errors(disjuncts))
+ // prefix '-' to sort to top
+ errs = ctx.Newf("%d errors in empty disjunction:", k)
+ }
+
+ errs = errors.Append(errs, disjuncts)
+
+ return errs
+}
+
+func selectErrors(a []*Bottom) (errs errors.Error) {
+ // return all errors if less than a certain number.
+ if len(a) <= 2 {
+ for _, b := range a {
+ errs = errors.Append(errs, b.Err)
+
+ }
+ return errs
+ }
+
+ // First select only relevant errors.
+ isIncomplete := false
+ k := 0
+ for _, b := range a {
+ if !isIncomplete && b.Code >= IncompleteError {
+ k = 0
+ isIncomplete = true
+ }
+ a[k] = b
+ k++
+ }
+ a = a[:k]
+
+ // filter errors
+ positions := map[token.Pos]bool{}
+
+ add := func(b *Bottom, p token.Pos) bool {
+ if positions[p] {
+ return false
+ }
+ positions[p] = true
+ errs = errors.Append(errs, b.Err)
+ return true
+ }
+
+ for _, b := range a {
+ // TODO: Should we also distinguish by message type?
+ if add(b, b.Err.Position()) {
+ continue
+ }
+ for _, p := range b.Err.InputPositions() {
+ if add(b, p) {
+ break
+ }
+ }
+ }
+
+ return errs
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/doc.go b/vendor/cuelang.org/go/internal/core/adt/doc.go
new file mode 100644
index 0000000000..26c978e2f3
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/doc.go
@@ -0,0 +1,78 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package adt represents partially and fully evaluated CUE types.
+//
+// This package introduces several categories of types that indicate some set of
+// values that may be used in a certain situation. Concrete types may belong to
+// multiple categories.
+//
+//
+// Abstract Types
+//
+// The following types describe the a place where a value may be used:
+//
+// Decl a value than can be used as a StructLit element.
+// Elem a value than can be used as a ListLit element.
+// Expr represents an Expr in the CUE grammar.
+// Value a fully evaluated value that has no references (except for
+// children in composite values).
+// Node any of the above values.
+//
+// The following types categorize nodes by function:
+//
+// Resolver a reference to position in the result tree.
+// Evaluator evaluates to 1 value.
+// Yielder evaluates to 0 or more values.
+// Validator validates another value.
+//
+//
+// Reference resolution algorithm
+//
+// A Resolver is resolved within the context of an Environment. In CUE, a
+// reference is evaluated by substituting it with a copy of the value to which
+// it refers. If the copied value itself contains references we can distinguish
+// two different cases. References that refer to values within the copied
+// reference (not regarding selectors) will henceforth point to the copied node.
+// References that point to outside the referened value will keep referring to
+// their original value.
+//
+// a: b: {
+// c: int
+// d: c
+// e: f
+// }
+// f: 4
+// g: a.b { // d.c points to inside the referred value, e.f, not.
+// c: 3
+// }
+//
+// The implementation doesn't actually copy referred values, but rather resolves
+// references with the aid of an Environment. During compile time, each
+// references is associated with the label and a number indicating in which
+// parent scope (offset from the current) this label needs to be looked up. An
+// Environment keeps track of the point at which a value was referenced,
+// providing enough information to look up the labeled value. This Environment
+// is the identical for all references within a fields conjunct. Often, an
+// Environment can even be shared among conjuncts.
+//
+//
+// Values
+//
+// Values are fully evaluated expressions. As this means that all references
+// will have been eliminated, Values are fully defined without the need for an
+// Environment. Additionally, Values represent a fully evaluated form, stripped
+// of any comprehensions, optional fields or embeddings.
+//
+package adt
diff --git a/vendor/cuelang.org/go/internal/core/adt/equality.go b/vendor/cuelang.org/go/internal/core/adt/equality.go
new file mode 100644
index 0000000000..cb1d338137
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/equality.go
@@ -0,0 +1,192 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+type Flag uint16
+
+const (
+ // IgnoreOptional allows optional information to be ignored. This only
+ // applies when CheckStructural is given.
+ IgnoreOptional Flag = 1 << iota
+
+ // CheckStructural indicates that closedness information should be
+ // considered for equality. Equal may return false even when values are
+ // equal.
+ CheckStructural Flag = 1 << iota
+)
+
+func Equal(ctx *OpContext, v, w Value, flags Flag) bool {
+ if x, ok := v.(*Vertex); ok {
+ return equalVertex(ctx, x, w, flags)
+ }
+ if y, ok := w.(*Vertex); ok {
+ return equalVertex(ctx, y, v, flags)
+ }
+ return equalTerminal(ctx, v, w, flags)
+}
+
+func equalVertex(ctx *OpContext, x *Vertex, v Value, flags Flag) bool {
+ y, ok := v.(*Vertex)
+ if !ok {
+ return false
+ }
+ if x == y {
+ return true
+ }
+ xk := x.Kind()
+ yk := y.Kind()
+
+ if xk != yk {
+ return false
+ }
+
+ if len(x.Arcs) != len(y.Arcs) {
+ return false
+ }
+
+ // TODO: this really should be subsumption.
+ if flags != 0 {
+ if x.IsClosedStruct() != y.IsClosedStruct() {
+ return false
+ }
+ if !equalClosed(ctx, x, y, flags) {
+ return false
+ }
+ }
+
+loop1:
+ for _, a := range x.Arcs {
+ for _, b := range y.Arcs {
+ if a.Label == b.Label {
+ if !Equal(ctx, a, b, flags) {
+ return false
+ }
+ continue loop1
+ }
+ }
+ return false
+ }
+
+ // We do not need to do the following check, because of the pigeon-hole principle.
+ // loop2:
+ // for _, b := range y.Arcs {
+ // for _, a := range x.Arcs {
+ // if a.Label == b.Label {
+ // continue loop2
+ // }
+ // }
+ // return false
+ // }
+
+ v, ok1 := x.BaseValue.(Value)
+ w, ok2 := y.BaseValue.(Value)
+ if !ok1 && !ok2 {
+ return true // both are struct or list.
+ }
+
+ return equalTerminal(ctx, v, w, flags)
+}
+
+// equalClosed tests if x and y have the same set of close information.
+// TODO: the following refinements are possible:
+// - unify optional fields and equate the optional fields
+// - do the same for pattern constraints, where the pattern constraints
+// are collated by pattern equality.
+// - a further refinement would collate patterns by ranges.
+//
+// For all these refinements it would be necessary to have well-working
+// structure sharing so as to not repeatedly recompute optional arcs.
+func equalClosed(ctx *OpContext, x, y *Vertex, flags Flag) bool {
+ return verifyStructs(x, y, flags) && verifyStructs(y, x, flags)
+}
+
+func verifyStructs(x, y *Vertex, flags Flag) bool {
+outer:
+ for _, s := range x.Structs {
+ if (flags&IgnoreOptional != 0) && !s.StructLit.HasOptional() {
+ continue
+ }
+ if s.closeInfo == nil || s.closeInfo.span&DefinitionSpan == 0 {
+ if !s.StructLit.HasOptional() {
+ continue
+ }
+ }
+ for _, t := range y.Structs {
+ if s.StructLit == t.StructLit {
+ continue outer
+ }
+ }
+ return false
+ }
+ return true
+}
+
+func equalTerminal(ctx *OpContext, v, w Value, flags Flag) bool {
+ if v == w {
+ return true
+ }
+
+ switch x := v.(type) {
+ case *Num, *String, *Bool, *Bytes, *Null:
+ if b, ok := BinOp(ctx, EqualOp, v, w).(*Bool); ok {
+ return b.B
+ }
+ return false
+
+ // TODO: for the remainder we are dealing with non-concrete values, so we
+ // could also just not bother.
+
+ case *BoundValue:
+ if y, ok := w.(*BoundValue); ok {
+ return x.Op == y.Op && Equal(ctx, x.Value, y.Value, flags)
+ }
+
+ case *BasicType:
+ if y, ok := w.(*BasicType); ok {
+ return x.K == y.K
+ }
+
+ case *Conjunction:
+ y, ok := w.(*Conjunction)
+ if !ok || len(x.Values) != len(y.Values) {
+ return false
+ }
+ // always ordered the same
+ for i, xe := range x.Values {
+ if !Equal(ctx, xe, y.Values[i], flags) {
+ return false
+ }
+ }
+ return true
+
+ case *Disjunction:
+ // The best way to compute this is with subsumption, but even that won't
+ // be too accurate. Assume structural equivalence for now.
+ y, ok := w.(*Disjunction)
+ if !ok || len(x.Values) != len(y.Values) {
+ return false
+ }
+ for i, xe := range x.Values {
+ if !Equal(ctx, xe, y.Values[i], flags) {
+ return false
+ }
+ }
+ return true
+
+ case *BuiltinValidator:
+ }
+
+ return false
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/errors.go b/vendor/cuelang.org/go/internal/core/adt/errors.go
new file mode 100644
index 0000000000..d5f6cfc17a
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/errors.go
@@ -0,0 +1,324 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// This file contains error encodings.
+//
+//
+// *Bottom:
+// - an adt.Value
+// - always belongs to a single vertex.
+// - does NOT implement error
+// - marks error code used for control flow
+//
+// errors.Error
+// - CUE default error
+// - implements error
+// - tracks error locations
+// - has error message details
+// - supports multiple errors
+//
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ cueformat "cuelang.org/go/cue/format"
+ "cuelang.org/go/cue/token"
+)
+
+// ErrorCode indicates the type of error. The type of error may influence
+// control flow. No other aspects of an error may influence control flow.
+type ErrorCode int
+
+const (
+ // An EvalError is a fatal evaluation error.
+ EvalError ErrorCode = iota
+
+ // A UserError is a fatal error originating from the user.
+ UserError
+
+ // StructuralCycleError means a structural cycle was found. Structural
+ // cycles are permanent errors, but they are not passed up recursively,
+ // as a unification of a value with a structural cycle with one that
+ // doesn't may still give a useful result.
+ StructuralCycleError
+
+ // IncompleteError means an evaluation could not complete because of
+ // insufficient information that may still be added later.
+ IncompleteError
+
+ // A CycleError indicates a reference error. It is considered to be
+ // an incomplete error, as reference errors may be broken by providing
+ // a concrete value.
+ CycleError
+)
+
+func (c ErrorCode) String() string {
+ switch c {
+ case EvalError:
+ return "eval"
+ case UserError:
+ return "user"
+ case StructuralCycleError:
+ return "structural cycle"
+ case IncompleteError:
+ return "incomplete"
+ case CycleError:
+ return "cycle"
+ }
+ return "unknown"
+}
+
+// Bottom represents an error or bottom symbol.
+//
+// Although a Bottom node holds control data, it should not be created until the
+// control information already resulted in an error.
+type Bottom struct {
+ Src ast.Node
+ Err errors.Error
+
+ Code ErrorCode
+ HasRecursive bool
+ ChildError bool // Err is the error of the child
+ NotExists bool // This error originated from a failed lookup.
+ // Value holds the computed value so far in case
+ Value Value
+}
+
+func (x *Bottom) Source() ast.Node { return x.Src }
+func (x *Bottom) Kind() Kind { return BottomKind }
+func (x *Bottom) Specialize(k Kind) Value { return x } // XXX remove
+
+func (b *Bottom) IsIncomplete() bool {
+ if b == nil {
+ return false
+ }
+ return b.Code == IncompleteError || b.Code == CycleError
+}
+
+// isLiteralBottom reports whether x is an error originating from a user.
+func isLiteralBottom(x Expr) bool {
+ b, ok := x.(*Bottom)
+ return ok && b.Code == UserError
+}
+
+// isError reports whether v is an error or nil.
+func isError(v Value) bool {
+ if v == nil {
+ return true
+ }
+ _, ok := v.(*Bottom)
+ return ok
+}
+
+// isIncomplete reports whether v is associated with an incomplete error.
+func isIncomplete(v *Vertex) bool {
+ if v == nil {
+ return true
+ }
+ if b, ok := v.BaseValue.(*Bottom); ok {
+ return b.IsIncomplete()
+ }
+ return false
+}
+
+// AddChildError updates x to record an error that occurred in one of
+// its descendent arcs. The resulting error will record the worst error code of
+// the current error or recursive error.
+//
+// If x is not already an error, the value is recorded in the error for
+// reference.
+//
+func (v *Vertex) AddChildError(recursive *Bottom) {
+ v.ChildErrors = CombineErrors(nil, v.ChildErrors, recursive)
+ if recursive.IsIncomplete() {
+ return
+ }
+ x := v.BaseValue
+ err, _ := x.(*Bottom)
+ if err == nil {
+ v.BaseValue = &Bottom{
+ Code: recursive.Code,
+ Value: v,
+ HasRecursive: true,
+ ChildError: true,
+ Err: recursive.Err,
+ }
+ return
+ }
+
+ err.HasRecursive = true
+ if err.Code > recursive.Code {
+ err.Code = recursive.Code
+ }
+
+ v.BaseValue = err
+}
+
+// CombineErrors combines two errors that originate at the same Vertex.
+func CombineErrors(src ast.Node, x, y Value) *Bottom {
+ a, _ := Unwrap(x).(*Bottom)
+ b, _ := Unwrap(y).(*Bottom)
+
+ if a == b && isCyclePlaceholder(a) {
+ return a
+ }
+ switch {
+ case a != nil && b != nil:
+ case a != nil:
+ return a
+ case b != nil:
+ return b
+ default:
+ return nil
+ }
+
+ if a.Code != b.Code {
+ if a.Code > b.Code {
+ a, b = b, a
+ }
+
+ if b.Code >= IncompleteError {
+ return a
+ }
+ }
+
+ return &Bottom{
+ Src: src,
+ Err: errors.Append(a.Err, b.Err),
+ Code: a.Code,
+ }
+}
+
+// A ValueError is returned as a result of evaluating a value.
+type ValueError struct {
+ r Runtime
+ v *Vertex
+ pos token.Pos
+ auxpos []token.Pos
+ errors.Message
+}
+
+func (v *ValueError) AddPosition(n Node) {
+ if n == nil {
+ return
+ }
+ if p := pos(n); p != token.NoPos {
+ for _, q := range v.auxpos {
+ if p == q {
+ return
+ }
+ }
+ v.auxpos = append(v.auxpos, p)
+ }
+}
+
+func (v *ValueError) AddClosedPositions(c CloseInfo) {
+ for s := c.closeInfo; s != nil; s = s.parent {
+ if loc := s.location; loc != nil {
+ v.AddPosition(loc)
+ }
+ }
+}
+
+func (c *OpContext) errNode() *Vertex {
+ return c.vertex
+}
+
+// MarkPositions marks the current position stack.
+func (c *OpContext) MarkPositions() int {
+ return len(c.positions)
+}
+
+// ReleasePositions sets the position state to one from a call to MarkPositions.
+func (c *OpContext) ReleasePositions(p int) {
+ c.positions = c.positions[:p]
+}
+
+func (c *OpContext) AddPosition(n Node) {
+ if n != nil {
+ c.positions = append(c.positions, n)
+ }
+}
+
+func (c *OpContext) Newf(format string, args ...interface{}) *ValueError {
+ return c.NewPosf(c.pos(), format, args...)
+}
+
+func appendNodePositions(a []token.Pos, n Node) []token.Pos {
+ if p := pos(n); p != token.NoPos {
+ a = append(a, p)
+ }
+ if v, ok := n.(*Vertex); ok {
+ for _, c := range v.Conjuncts {
+ a = appendNodePositions(a, c.Elem())
+ }
+ }
+ return a
+}
+
+func (c *OpContext) NewPosf(p token.Pos, format string, args ...interface{}) *ValueError {
+ var a []token.Pos
+ if len(c.positions) > 0 {
+ a = make([]token.Pos, 0, len(c.positions))
+ for _, n := range c.positions {
+ a = appendNodePositions(a, n)
+ }
+ }
+ for i, arg := range args {
+ switch x := arg.(type) {
+ case Node:
+ a = appendNodePositions(a, x)
+ args[i] = c.Str(x)
+ case ast.Node:
+ b, _ := cueformat.Node(x)
+ if p := x.Pos(); p != token.NoPos {
+ a = append(a, p)
+ }
+ args[i] = string(b)
+ case Feature:
+ args[i] = x.SelectorString(c.Runtime)
+ }
+ }
+ return &ValueError{
+ r: c.Runtime,
+ v: c.errNode(),
+ pos: p,
+ auxpos: a,
+ Message: errors.NewMessage(format, args),
+ }
+}
+
+func (e *ValueError) Error() string {
+ return errors.String(e)
+}
+
+func (e *ValueError) Position() token.Pos {
+ return e.pos
+}
+
+func (e *ValueError) InputPositions() (a []token.Pos) {
+ return e.auxpos
+}
+
+func (e *ValueError) Path() (a []string) {
+ if e.v == nil {
+ return nil
+ }
+ for _, f := range appendPath(nil, e.v) {
+ a = append(a, f.SelectorString(e.r))
+ }
+ return a
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/eval.go b/vendor/cuelang.org/go/internal/core/adt/eval.go
new file mode 100644
index 0000000000..37e8cd9362
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/eval.go
@@ -0,0 +1,2173 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package eval contains the high level CUE evaluation strategy.
+//
+// CUE allows for a significant amount of freedom in order of evaluation due to
+// the commutativity of the unification operation. This package implements one
+// of the possible strategies.
+package adt
+
+// TODO:
+// - result should be nodeContext: this allows optionals info to be extracted
+// and computed.
+//
+
+import (
+ "fmt"
+ "html/template"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO
+//
+// - Reuse work from previous cycles. For instance, if we can guarantee that a
+// value is always correct for partial results, we can just process the arcs
+// going from Partial to Finalized, without having to reevaluate the value.
+//
+// - Test closedness far more thoroughly.
+//
+
+type Stats struct {
+ DisjunctCount int
+ UnifyCount int
+
+ Freed int
+ Retained int
+ Reused int
+ Allocs int
+}
+
+// Leaks reports the number of nodeContext structs leaked. These are typically
+// benign, as they will just be garbage collected, as long as the pointer from
+// the original nodes has been eliminated or the original nodes are also not
+// referred to. But Leaks may have notable impact on performance, and thus
+// should be avoided.
+func (s *Stats) Leaks() int {
+ return s.Allocs + s.Reused - s.Freed
+}
+
+var stats = template.Must(template.New("stats").Parse(`{{"" -}}
+
+Leaks: {{.Leaks}}
+Freed: {{.Freed}}
+Reused: {{.Reused}}
+Allocs: {{.Allocs}}
+Retain: {{.Retained}}
+
+Unifications: {{.UnifyCount}}
+Disjuncts: {{.DisjunctCount}}`))
+
+func (s *Stats) String() string {
+ buf := &strings.Builder{}
+ _ = stats.Execute(buf, s)
+ return buf.String()
+}
+
+func (c *OpContext) Stats() *Stats {
+ return &c.stats
+}
+
+// TODO: Note: NewContext takes essentially a cue.Value. By making this
+// type more central, we can perhaps avoid context creation.
+
+// func NewContext(r Runtime, v *Vertex) *OpContext {
+// e := NewUnifier(r)
+// return e.NewContext(v)
+// }
+
+var structSentinel = &StructMarker{}
+
+var incompleteSentinel = &Bottom{
+ Code: IncompleteError,
+ Err: errors.Newf(token.NoPos, "incomplete"),
+}
+
+// evaluate returns the evaluated value associated with v. It may return a
+// partial result. That is, if v was not yet unified, it may return a
+// concrete value that must be the result assuming the configuration has no
+// errors.
+//
+// This semantics allows CUE to break reference cycles in a straightforward
+// manner.
+//
+// Vertex v must still be evaluated at some point to catch the underlying
+// error.
+//
+// TODO: return *Vertex
+func (c *OpContext) evaluate(v *Vertex, state VertexStatus) Value {
+ if v.isUndefined() {
+ // Use node itself to allow for cycle detection.
+ c.Unify(v, state)
+ }
+
+ if n := v.state; n != nil {
+ if n.errs != nil && !n.errs.IsIncomplete() {
+ return n.errs
+ }
+ if n.scalar != nil && isCyclePlaceholder(v.BaseValue) {
+ return n.scalar
+ }
+ }
+
+ switch x := v.BaseValue.(type) {
+ case *Bottom:
+ if x.IsIncomplete() {
+ c.AddBottom(x)
+ return nil
+ }
+ return x
+
+ case nil:
+ if v.state != nil {
+ switch x := v.state.getValidators().(type) {
+ case Value:
+ return x
+ default:
+ w := *v
+ w.BaseValue = x
+ return &w
+ }
+ }
+ Assertf(false, "no BaseValue: state: %v; requested: %v", v.status, state)
+ }
+
+ if v.status < Finalized && v.state != nil {
+ // TODO: errors are slightly better if we always add addNotify, but
+ // in this case it is less likely to cause a performance penalty.
+ // See https://cuelang.org/issue/661. It may be possible to
+ // relax this again once we have proper tests to prevent regressions of
+ // that issue.
+ if !v.state.done() || v.state.errs != nil {
+ v.state.addNotify(c.vertex)
+ }
+ }
+
+ return v
+}
+
+// Unify fully unifies all values of a Vertex to completion and stores
+// the result in the Vertex. If unify was called on v before it returns
+// the cached results.
+func (c *OpContext) Unify(v *Vertex, state VertexStatus) {
+ // defer c.PopVertex(c.PushVertex(v))
+ if Debug {
+ c.nest++
+ c.Logf(v, "Unify")
+ defer func() {
+ c.Logf(v, "END Unify")
+ c.nest--
+ }()
+ }
+
+ // Ensure a node will always have a nodeContext after calling Unify if it is
+ // not yet Finalized.
+ n := v.getNodeContext(c)
+ defer v.freeNode(n)
+
+ if state <= v.Status() {
+ if v.Status() != Partial && state != Partial {
+ return
+ }
+ }
+
+ switch v.Status() {
+ case Evaluating:
+ n.insertConjuncts()
+ return
+
+ case EvaluatingArcs:
+ Assertf(v.status > 0, "unexpected status %d", v.status)
+ return
+
+ case 0:
+ if v.Label.IsDef() {
+ v.Closed = true
+ }
+
+ if v.Parent != nil {
+ if v.Parent.Closed {
+ v.Closed = true
+ }
+ }
+
+ if p := v.Parent; p != nil && p.state != nil && v.Label.IsString() {
+ for _, s := range p.state.node.Structs {
+ if s.Disable {
+ continue
+ }
+ s.MatchAndInsert(n.ctx, v)
+ }
+ }
+
+ if !n.checkClosed(state) {
+ return
+ }
+
+ defer c.PopArc(c.PushArc(v))
+
+ c.stats.UnifyCount++
+
+ // Clear any remaining error.
+ if err := c.Err(); err != nil {
+ panic("uncaught error")
+ }
+
+ // Set the cache to a cycle error to ensure a cyclic reference will result
+ // in an error if applicable. A cyclic error may be ignored for
+ // non-expression references. The cycle error may also be removed as soon
+ // as there is evidence what a correct value must be, but before all
+ // validation has taken place.
+ //
+ // TODO(cycle): having a more recursive algorithm would make this
+ // special cycle handling unnecessary.
+ v.BaseValue = cycle
+
+ v.UpdateStatus(Evaluating)
+
+ n.conjuncts = v.Conjuncts
+ n.insertConjuncts()
+
+ fallthrough
+
+ case Partial:
+ defer c.PopArc(c.PushArc(v))
+
+ v.status = Evaluating
+
+ // Use maybeSetCache for cycle breaking
+ for n.maybeSetCache(); n.expandOne(); n.maybeSetCache() {
+ }
+
+ n.doNotify()
+
+ if !n.done() {
+ switch {
+ case len(n.disjunctions) > 0 && isCyclePlaceholder(v.BaseValue):
+ // We disallow entering computations of disjunctions with
+ // incomplete data.
+ if state == Finalized {
+ b := c.NewErrf("incomplete cause disjunction")
+ b.Code = IncompleteError
+ n.errs = CombineErrors(nil, n.errs, b)
+ v.SetValue(n.ctx, Finalized, b)
+ } else {
+ n.node.UpdateStatus(Partial)
+ }
+ return
+
+ case state <= AllArcs:
+ n.node.UpdateStatus(Partial)
+ return
+ }
+ }
+
+ if s := v.Status(); state <= s {
+ // We have found a partial result. There may still be errors
+ // down the line which may result from further evaluating this
+ // field, but that will be caught when evaluating this field
+ // for real.
+
+ // This also covers the case where a recursive evaluation triggered
+ // this field to become finalized in the mean time. In that case
+ // we can avoid running another expandDisjuncts.
+ return
+ }
+
+ // Disjunctions should always be finalized. If there are nested
+ // disjunctions the last one should be finalized.
+ disState := state
+ if len(n.disjunctions) > 0 && disState != Finalized {
+ disState = Finalized
+ }
+ n.expandDisjuncts(disState, n, maybeDefault, false, true)
+
+ n.finalizeDisjuncts()
+
+ switch len(n.disjuncts) {
+ case 0:
+ case 1:
+ x := n.disjuncts[0].result
+ x.state = nil
+ *v = x
+
+ default:
+ d := n.createDisjunct()
+ v.BaseValue = d
+ // The conjuncts will have too much information. Better have no
+ // information than incorrect information.
+ for _, d := range d.Values {
+ // We clear the conjuncts for now. As these disjuncts are for API
+ // use only, we will fill them out when necessary (using Defaults).
+ d.Conjuncts = nil
+
+ // TODO: use a more principled form of dereferencing. For instance,
+ // disjuncts could already be assumed to be the given Vertex, and
+ // the the main vertex could be dereferenced during evaluation.
+ for _, a := range d.Arcs {
+ for _, x := range a.Conjuncts {
+ // All the environments for embedded structs need to be
+ // dereferenced.
+ for env := x.Env; env != nil && env.Vertex == v; env = env.Up {
+ env.Vertex = d
+ }
+ }
+ }
+ }
+ v.Arcs = nil
+ // v.Structs = nil // TODO: should we keep or discard the Structs?
+ // TODO: how to represent closedness information? Do we need it?
+ }
+
+ // If the state has changed, it is because a disjunct has been run, or
+ // because a single disjunct has replaced it. Restore the old state as
+ // to not confuse memory management.
+ v.state = n
+
+ // We don't do this in postDisjuncts, as it should only be done after
+ // completing all disjunctions.
+ if !n.done() {
+ if err := n.incompleteErrors(); err != nil {
+ b, _ := n.node.BaseValue.(*Bottom)
+ if b != err {
+ err = CombineErrors(n.ctx.src, b, err)
+ }
+ n.node.BaseValue = err
+ }
+ }
+
+ if state != Finalized {
+ return
+ }
+
+ if v.BaseValue == nil {
+ v.BaseValue = n.getValidators()
+ }
+
+ // Free memory here?
+ v.UpdateStatus(Finalized)
+
+ case AllArcs:
+ if !n.checkClosed(state) {
+ break
+ }
+
+ defer c.PopArc(c.PushArc(v))
+
+ n.completeArcs(state)
+
+ case Finalized:
+ }
+}
+
+// insertConjuncts inserts conjuncts previously uninserted.
+func (n *nodeContext) insertConjuncts() {
+ for len(n.conjuncts) > 0 {
+ nInfos := len(n.node.Structs)
+ p := &n.conjuncts[0]
+ n.conjuncts = n.conjuncts[1:]
+ n.addExprConjunct(*p)
+
+ // Record the OptionalTypes for all structs that were inferred by this
+ // Conjunct. This information can be used by algorithms such as trim.
+ for i := nInfos; i < len(n.node.Structs); i++ {
+ p.CloseInfo.FieldTypes |= n.node.Structs[i].types
+ }
+ }
+}
+
+// finalizeDisjuncts: incomplete errors are kept around and not removed early.
+// This call filters the incomplete errors and removes them
+//
+// This also collects all errors of empty disjunctions. These cannot be
+// collected during the finalization state of individual disjuncts. Care should
+// be taken to only call this after all disjuncts have been finalized.
+func (n *nodeContext) finalizeDisjuncts() {
+ a := n.disjuncts
+ if len(a) == 0 {
+ return
+ }
+ k := 0
+ for i, d := range a {
+ switch d.finalDone() {
+ case true:
+ a[k], a[i] = d, a[k]
+ k++
+ default:
+ if err := d.incompleteErrors(); err != nil {
+ n.disjunctErrs = append(n.disjunctErrs, err)
+ }
+ }
+ d.free()
+ }
+ if k == 0 {
+ n.makeError()
+ }
+ n.disjuncts = a[:k]
+}
+
+func (n *nodeContext) doNotify() {
+ if n.errs == nil || len(n.notify) == 0 {
+ return
+ }
+ for _, v := range n.notify {
+ if v.state == nil {
+ if b, ok := v.BaseValue.(*Bottom); ok {
+ v.BaseValue = CombineErrors(nil, b, n.errs)
+ } else {
+ v.BaseValue = n.errs
+ }
+ } else {
+ v.state.addBottom(n.errs)
+ }
+ }
+ n.notify = n.notify[:0]
+}
+
+func (n *nodeContext) postDisjunct(state VertexStatus) {
+ ctx := n.ctx
+
+ for {
+ // Use maybeSetCache for cycle breaking
+ for n.maybeSetCache(); n.expandOne(); n.maybeSetCache() {
+ }
+
+ if aList, id := n.addLists(); aList != nil {
+ n.updateNodeType(ListKind, aList, id)
+ } else {
+ break
+ }
+ }
+
+ if n.aStruct != nil {
+ n.updateNodeType(StructKind, n.aStruct, n.aStructID)
+ }
+
+ switch err := n.getErr(); {
+ case err != nil:
+ n.node.BaseValue = err
+ n.errs = nil
+
+ default:
+ if isCyclePlaceholder(n.node.BaseValue) {
+ if !n.done() {
+ n.node.BaseValue = n.incompleteErrors()
+ } else {
+ n.node.BaseValue = nil
+ }
+ }
+ // TODO: this ideally should be done here. However, doing so causes
+ // a somewhat more aggressive cutoff in disjunction cycles, which cause
+ // some incompatibilities. Fix in another CL.
+ //
+ // else if !n.done() {
+ // n.expandOne()
+ // if err := n.incompleteErrors(); err != nil {
+ // n.node.BaseValue = err
+ // }
+ // }
+
+ // We are no longer evaluating.
+ // n.node.UpdateStatus(Partial)
+ n.node.UpdateStatus(Evaluating)
+
+ // Either set to Conjunction or error.
+ // TODO: verify and simplify the below code to determine whether
+ // something is a struct.
+ markStruct := false
+ if n.aStruct != nil {
+ markStruct = true
+ } else if len(n.node.Structs) > 0 {
+ markStruct = n.kind&StructKind != 0 && !n.hasTop
+ }
+ v := n.node.Value()
+ if n.node.BaseValue == nil && markStruct {
+ n.node.BaseValue = &StructMarker{}
+ v = n.node
+ }
+ if v != nil && IsConcrete(v) {
+ // Also check when we already have errors as we may find more
+ // serious errors and would like to know about all errors anyway.
+
+ if n.lowerBound != nil {
+ if b := ctx.Validate(n.lowerBound, v); b != nil {
+ // TODO(errors): make Validate return boolean and generate
+ // optimized conflict message. Also track and inject IDs
+ // to determine origin location.s
+ if e, _ := b.Err.(*ValueError); e != nil {
+ e.AddPosition(n.lowerBound)
+ e.AddPosition(v)
+ }
+ n.addBottom(b)
+ }
+ }
+ if n.upperBound != nil {
+ if b := ctx.Validate(n.upperBound, v); b != nil {
+ // TODO(errors): make Validate return boolean and generate
+ // optimized conflict message. Also track and inject IDs
+ // to determine origin location.s
+ if e, _ := b.Err.(*ValueError); e != nil {
+ e.AddPosition(n.upperBound)
+ e.AddPosition(v)
+ }
+ n.addBottom(b)
+ }
+ }
+ // MOVE BELOW
+ // TODO(perf): only delay processing of actual non-monotonic checks.
+ skip := n.skipNonMonotonicChecks()
+ if v := n.node.Value(); v != nil && IsConcrete(v) && !skip {
+ for _, v := range n.checks {
+ // TODO(errors): make Validate return bottom and generate
+ // optimized conflict message. Also track and inject IDs
+ // to determine origin location.s
+ if b := ctx.Validate(v, n.node); b != nil {
+ n.addBottom(b)
+ }
+ }
+ }
+ } else if state == Finalized {
+ n.node.BaseValue = n.getValidators()
+ }
+
+ if v == nil {
+ break
+ }
+
+ switch {
+ case v.Kind() == ListKind:
+ for _, a := range n.node.Arcs {
+ if a.Label.Typ() == StringLabel {
+ n.addErr(ctx.Newf("list may not have regular fields"))
+ // TODO(errors): add positions for list and arc definitions.
+
+ }
+ }
+
+ // case !isStruct(n.node) && v.Kind() != BottomKind:
+ // for _, a := range n.node.Arcs {
+ // if a.Label.IsRegular() {
+ // n.addErr(errors.Newf(token.NoPos,
+ // // TODO(errors): add positions of non-struct values and arcs.
+ // "cannot combine scalar values with arcs"))
+ // }
+ // }
+ }
+ }
+
+ if err := n.getErr(); err != nil {
+ if b, _ := n.node.BaseValue.(*Bottom); b != nil {
+ err = CombineErrors(nil, b, err)
+ }
+ n.node.BaseValue = err
+ // TODO: add return: if evaluation of arcs is important it can be done
+ // later. Logically we're done.
+ }
+
+ n.completeArcs(state)
+}
+
+func (n *nodeContext) incompleteErrors() *Bottom {
+ // collect incomplete errors.
+ var err *Bottom // n.incomplete
+ for _, d := range n.dynamicFields {
+ err = CombineErrors(nil, err, d.err)
+ }
+ for _, c := range n.comprehensions {
+ err = CombineErrors(nil, err, c.err)
+ }
+ for _, x := range n.exprs {
+ err = CombineErrors(nil, err, x.err)
+ }
+ if err == nil {
+ // safeguard.
+ err = incompleteSentinel
+ }
+ return err
+}
+
+// TODO(perf): ideally we should always perform a closedness check if
+// state is Finalized. This is currently not possible when computing a
+// partial disjunction as the closedness information is not yet
+// complete, possibly leading to a disjunct to be rejected prematurely.
+// It is probably possible to fix this if we could add StructInfo
+// structures demarked per conjunct.
+//
+// In practice this should not be a problem: when disjuncts originate
+// from the same disjunct, they will have the same StructInfos, and thus
+// Equal is able to equate them even in the precense of optional field.
+// In general, combining any limited set of disjuncts will soon reach
+// a fixed point where duplicate elements can be eliminated this way.
+//
+// Note that not checking closedness is irrelevant for disjunctions of
+// scalars. This means it also doesn't hurt performance where structs
+// have a discriminator field (e.g. Kubernetes). We should take care,
+// though, that any potential performance issues are eliminated for
+// Protobuf-like oneOf fields.
+func (n *nodeContext) checkClosed(state VertexStatus) bool {
+ ignore := state != Finalized || n.skipNonMonotonicChecks()
+
+ v := n.node
+ if !v.Label.IsInt() && v.Parent != nil && !ignore {
+ ctx := n.ctx
+ // Visit arcs recursively to validate and compute error.
+ if _, err := verifyArc2(ctx, v.Label, v, v.Closed); err != nil {
+ // Record error in child node to allow recording multiple
+ // conflicts at the appropriate place, to allow valid fields to
+ // be represented normally and, most importantly, to avoid
+ // recursive processing of a disallowed field.
+ v.SetValue(ctx, Finalized, err)
+ return false
+ }
+ }
+ return true
+}
+
+func (n *nodeContext) completeArcs(state VertexStatus) {
+ if DebugSort > 0 {
+ DebugSortArcs(n.ctx, n.node)
+ }
+
+ if state <= AllArcs {
+ n.node.UpdateStatus(AllArcs)
+ return
+ }
+
+ n.node.UpdateStatus(EvaluatingArcs)
+
+ ctx := n.ctx
+
+ if !assertStructuralCycle(n) {
+ // Visit arcs recursively to validate and compute error.
+ for _, a := range n.node.Arcs {
+ if a.nonMonotonicInsertGen >= a.nonMonotonicLookupGen && a.nonMonotonicLookupGen > 0 {
+ err := ctx.Newf(
+ "cycle: field inserted by if clause that was previously evaluated by another if clause: %s", a.Label)
+ err.AddPosition(n.node)
+ n.node.BaseValue = &Bottom{Err: err}
+ } else if a.nonMonotonicReject {
+ err := ctx.Newf(
+ "cycle: field was added after an if clause evaluated it: %s",
+ a.Label)
+ err.AddPosition(n.node)
+ n.node.BaseValue = &Bottom{Err: err}
+ }
+
+ // Call UpdateStatus here to be absolutely sure the status is set
+ // correctly and that we are not regressing.
+ n.node.UpdateStatus(EvaluatingArcs)
+ ctx.Unify(a, state)
+ // Don't set the state to Finalized if the child arcs are not done.
+ if state == Finalized && a.status < Finalized {
+ state = AllArcs
+ }
+ if err, _ := a.BaseValue.(*Bottom); err != nil {
+ n.node.AddChildError(err)
+ }
+ }
+ }
+
+ n.node.UpdateStatus(state)
+}
+
+func assertStructuralCycle(n *nodeContext) bool {
+ if cyclic := n.hasCycle && !n.hasNonCycle; cyclic {
+ n.node.BaseValue = CombineErrors(nil,
+ n.node.Value(),
+ &Bottom{
+ Code: StructuralCycleError,
+ Err: n.ctx.Newf("structural cycle"),
+ Value: n.node.Value(),
+ // TODO: probably, this should have the referenced arc.
+ })
+ // Don't process Arcs. This is mostly to ensure that no Arcs with
+ // an Unprocessed status remain in the output.
+ n.node.Arcs = nil
+ return true
+ }
+ return false
+}
+
+// TODO: this is now a sentinel. Use a user-facing error that traces where
+// the cycle originates.
+var cycle = &Bottom{
+ Err: errors.Newf(token.NoPos, "cycle error"),
+ Code: CycleError,
+}
+
+func isCyclePlaceholder(v BaseValue) bool {
+ return v == cycle
+}
+
+func (n *nodeContext) createDisjunct() *Disjunction {
+ a := make([]*Vertex, len(n.disjuncts))
+ p := 0
+ hasDefaults := false
+ for i, x := range n.disjuncts {
+ v := new(Vertex)
+ *v = x.result
+ v.state = nil
+ switch x.defaultMode {
+ case isDefault:
+ a[i] = a[p]
+ a[p] = v
+ p++
+ hasDefaults = true
+
+ case notDefault:
+ hasDefaults = true
+ fallthrough
+ case maybeDefault:
+ a[i] = v
+ }
+ }
+ // TODO: disambiguate based on concrete values.
+ // TODO: consider not storing defaults.
+ // if p > 0 {
+ // a = a[:p]
+ // }
+ return &Disjunction{
+ Values: a,
+ NumDefaults: p,
+ HasDefaults: hasDefaults,
+ }
+}
+
+type arcKey struct {
+ arc *Vertex
+ id CloseInfo
+}
+
+// A nodeContext is used to collate all conjuncts of a value to facilitate
+// unification. Conceptually order of unification does not matter. However,
+// order has relevance when performing checks of non-monotic properities. Such
+// checks should only be performed once the full value is known.
+type nodeContext struct {
+ nextFree *nodeContext
+ refCount int
+
+ ctx *OpContext
+ node *Vertex
+
+ // usedArcs is a list of arcs that were looked up during non-monotonic operations, but do not exist yet.
+ usedArcs []*Vertex
+
+ // TODO: (this is CL is first step)
+ // filter *Vertex a subset of composite with concrete fields for
+ // bloom-like filtering of disjuncts. We should first verify, however,
+ // whether some breath-first search gives sufficient performance, as this
+ // should already ensure a quick-fail for struct disjunctions with
+ // discriminators.
+
+ arcMap []arcKey
+
+ // snapshot holds the last value of the vertex before calling postDisjunct.
+ snapshot Vertex
+
+ // Result holds the last evaluated value of the vertex after calling
+ // postDisjunct.
+ result Vertex
+
+ // Current value (may be under construction)
+ scalar Value // TODO: use Value in node.
+ scalarID CloseInfo
+
+ // Concrete conjuncts
+ kind Kind
+ kindExpr Expr // expr that adjust last value (for error reporting)
+ kindID CloseInfo // for error tracing
+ lowerBound *BoundValue // > or >=
+ upperBound *BoundValue // < or <=
+ checks []Validator // BuiltinValidator, other bound values.
+ errs *Bottom
+
+ // Conjuncts holds a reference to the Vertex Arcs that still need
+ // processing. It does NOT need to be copied.
+ conjuncts []Conjunct
+
+ // notify is used to communicate errors in cyclic dependencies.
+ // TODO: also use this to communicate increasingly more concrete values.
+ notify []*Vertex
+
+ // Struct information
+ dynamicFields []envDynamic
+ comprehensions []envYield
+ aStruct Expr
+ aStructID CloseInfo
+
+ // Expression conjuncts
+ lists []envList
+ vLists []*Vertex
+ exprs []envExpr
+
+ hasTop bool
+ hasCycle bool // has conjunct with structural cycle
+ hasNonCycle bool // has conjunct without structural cycle
+
+ // Disjunction handling
+ disjunctions []envDisjunct
+
+ // usedDefault indicates the for each of possibly multiple parent
+ // disjunctions whether it is unified with a default disjunct or not.
+ // This is then later used to determine whether a disjunction should
+ // be treated as a marked disjunction.
+ usedDefault []defaultInfo
+
+ defaultMode defaultMode
+ disjuncts []*nodeContext
+ buffer []*nodeContext
+ disjunctErrs []*Bottom
+}
+
+type defaultInfo struct {
+ // parentMode indicates whether this values was used as a default value,
+ // based on the parent mode.
+ parentMode defaultMode
+
+ // The result of default evaluation for a nested disjunction.
+ nestedMode defaultMode
+
+ origMode defaultMode
+}
+
+func (n *nodeContext) addNotify(v *Vertex) {
+ if v != nil {
+ n.notify = append(n.notify, v)
+ }
+}
+
+func (n *nodeContext) clone() *nodeContext {
+ d := n.ctx.newNodeContext(n.node)
+
+ d.refCount++
+
+ d.ctx = n.ctx
+ d.node = n.node
+
+ d.scalar = n.scalar
+ d.scalarID = n.scalarID
+ d.kind = n.kind
+ d.kindExpr = n.kindExpr
+ d.kindID = n.kindID
+ d.aStruct = n.aStruct
+ d.aStructID = n.aStructID
+ d.hasTop = n.hasTop
+
+ d.lowerBound = n.lowerBound
+ d.upperBound = n.upperBound
+ d.errs = n.errs
+ d.hasTop = n.hasTop
+ d.hasCycle = n.hasCycle
+ d.hasNonCycle = n.hasNonCycle
+
+ // d.arcMap = append(d.arcMap, n.arcMap...) // XXX add?
+ // d.usedArcs = append(d.usedArcs, n.usedArcs...) // XXX: add?
+ d.notify = append(d.notify, n.notify...)
+ d.checks = append(d.checks, n.checks...)
+ d.dynamicFields = append(d.dynamicFields, n.dynamicFields...)
+ d.comprehensions = append(d.comprehensions, n.comprehensions...)
+ d.lists = append(d.lists, n.lists...)
+ d.vLists = append(d.vLists, n.vLists...)
+ d.exprs = append(d.exprs, n.exprs...)
+ d.usedDefault = append(d.usedDefault, n.usedDefault...)
+
+ // No need to clone d.disjunctions
+
+ return d
+}
+
+func (c *OpContext) newNodeContext(node *Vertex) *nodeContext {
+ if n := c.freeListNode; n != nil {
+ c.stats.Reused++
+ c.freeListNode = n.nextFree
+
+ *n = nodeContext{
+ ctx: c,
+ node: node,
+ kind: TopKind,
+ usedArcs: n.usedArcs[:0],
+ arcMap: n.arcMap[:0],
+ notify: n.notify[:0],
+ checks: n.checks[:0],
+ dynamicFields: n.dynamicFields[:0],
+ comprehensions: n.comprehensions[:0],
+ lists: n.lists[:0],
+ vLists: n.vLists[:0],
+ exprs: n.exprs[:0],
+ disjunctions: n.disjunctions[:0],
+ usedDefault: n.usedDefault[:0],
+ disjunctErrs: n.disjunctErrs[:0],
+ disjuncts: n.disjuncts[:0],
+ buffer: n.buffer[:0],
+ }
+
+ return n
+ }
+ c.stats.Allocs++
+
+ return &nodeContext{
+ ctx: c,
+ node: node,
+ kind: TopKind,
+ }
+}
+
+func (v *Vertex) getNodeContext(c *OpContext) *nodeContext {
+ if v.state == nil {
+ if v.status == Finalized {
+ return nil
+ }
+ v.state = c.newNodeContext(v)
+ } else if v.state.node != v {
+ panic("getNodeContext: nodeContext out of sync")
+ }
+ v.state.refCount++
+ return v.state
+}
+
+func (v *Vertex) freeNode(n *nodeContext) {
+ if n == nil {
+ return
+ }
+ if n.node != v {
+ panic("freeNode: unpaired free")
+ }
+ if v.state != nil && v.state != n {
+ panic("freeNode: nodeContext out of sync")
+ }
+ if n.refCount--; n.refCount == 0 {
+ if v.status == Finalized {
+ v.freeNodeState()
+ } else {
+ n.ctx.stats.Retained++
+ }
+ }
+}
+
+func (v *Vertex) freeNodeState() {
+ if v.state == nil {
+ return
+ }
+ state := v.state
+ v.state = nil
+
+ state.ctx.freeNodeContext(state)
+}
+
+func (n *nodeContext) free() {
+ if n.refCount--; n.refCount == 0 {
+ n.ctx.freeNodeContext(n)
+ }
+}
+
+func (c *OpContext) freeNodeContext(n *nodeContext) {
+ c.stats.Freed++
+ n.nextFree = c.freeListNode
+ c.freeListNode = n
+ n.node = nil
+ n.refCount = 0
+}
+
+// TODO(perf): return a dedicated ConflictError that can track original
+// positions on demand.
+func (n *nodeContext) reportConflict(
+ v1, v2 Node,
+ k1, k2 Kind,
+ ids ...CloseInfo) {
+
+ ctx := n.ctx
+
+ var err *ValueError
+ if k1 == k2 {
+ err = ctx.NewPosf(token.NoPos, "conflicting values %s and %s", v1, v2)
+ } else {
+ err = ctx.NewPosf(token.NoPos,
+ "conflicting values %s and %s (mismatched types %s and %s)",
+ v1, v2, k1, k2)
+ }
+
+ err.AddPosition(v1)
+ err.AddPosition(v2)
+ for _, id := range ids {
+ err.AddClosedPositions(id)
+ }
+
+ n.addErr(err)
+}
+
+// reportFieldMismatch reports the mixture of regular fields with non-struct
+// values. Either s or f needs to be given.
+func (n *nodeContext) reportFieldMismatch(
+ p token.Pos,
+ s *StructLit,
+ f Feature,
+ scalar Expr,
+ id ...CloseInfo) {
+
+ ctx := n.ctx
+
+ if f == InvalidLabel {
+ for _, a := range s.Decls {
+ if x, ok := a.(*Field); ok && x.Label.IsRegular() {
+ f = x.Label
+ p = pos(x)
+ break
+ }
+ }
+ if f == InvalidLabel {
+ n.reportConflict(scalar, s, n.kind, StructKind, id...)
+ return
+ }
+ }
+
+ err := ctx.NewPosf(p, "cannot combine regular field %q with %v", f, scalar)
+
+ if s != nil {
+ err.AddPosition(s)
+ }
+
+ for _, ci := range id {
+ err.AddClosedPositions(ci)
+ }
+
+ n.addErr(err)
+}
+
+func (n *nodeContext) updateNodeType(k Kind, v Expr, id CloseInfo) bool {
+ ctx := n.ctx
+ kind := n.kind & k
+
+ switch {
+ case n.kind == BottomKind,
+ k == BottomKind:
+ return false
+
+ case kind != BottomKind:
+
+ // TODO: we could consider changing the reporting for structs, but this
+ // makes only sense in case they are for embeddings. Otherwise the type
+ // of a struct is more relevant for the failure.
+ // case k == StructKind:
+ // s, _ := v.(*StructLit)
+ // n.reportFieldMismatch(token.NoPos, s, 0, n.kindExpr, id, n.kindID)
+
+ case n.kindExpr != nil:
+ n.reportConflict(n.kindExpr, v, n.kind, k, n.kindID, id)
+
+ default:
+ n.addErr(ctx.Newf(
+ "conflicting value %s (mismatched types %s and %s)",
+ v, n.kind, k))
+ }
+
+ if n.kind != kind || n.kindExpr == nil {
+ n.kindExpr = v
+ }
+ n.kind = kind
+ return kind != BottomKind
+}
+
+func (n *nodeContext) done() bool {
+ return len(n.dynamicFields) == 0 &&
+ len(n.comprehensions) == 0 &&
+ len(n.exprs) == 0
+}
+
+// finalDone is like done, but allows for cycle errors, which can be ignored
+// as they essentially indicate a = a & _.
+func (n *nodeContext) finalDone() bool {
+ for _, x := range n.exprs {
+ if x.err.Code != CycleError {
+ return false
+ }
+ }
+ return len(n.dynamicFields) == 0 && len(n.comprehensions) == 0
+}
+
+// hasErr is used to determine if an evaluation path, for instance a single
+// path after expanding all disjunctions, has an error.
+func (n *nodeContext) hasErr() bool {
+ if n.node.ChildErrors != nil {
+ return true
+ }
+ if n.node.Status() > Evaluating && n.node.IsErr() {
+ return true
+ }
+ return n.ctx.HasErr() || n.errs != nil
+}
+
+func (n *nodeContext) getErr() *Bottom {
+ n.errs = CombineErrors(nil, n.errs, n.ctx.Err())
+ return n.errs
+}
+
+// getValidators sets the vertex' Value in case there was no concrete value.
+func (n *nodeContext) getValidators() BaseValue {
+ ctx := n.ctx
+
+ a := []Value{}
+ // if n.node.Value != nil {
+ // a = append(a, n.node.Value)
+ // }
+ kind := TopKind
+ if n.lowerBound != nil {
+ a = append(a, n.lowerBound)
+ kind &= n.lowerBound.Kind()
+ }
+ if n.upperBound != nil {
+ a = append(a, n.upperBound)
+ kind &= n.upperBound.Kind()
+ }
+ for _, c := range n.checks {
+ // Drop !=x if x is out of bounds with another bound.
+ if b, _ := c.(*BoundValue); b != nil && b.Op == NotEqualOp {
+ if n.upperBound != nil &&
+ SimplifyBounds(ctx, n.kind, n.upperBound, b) != nil {
+ continue
+ }
+ if n.lowerBound != nil &&
+ SimplifyBounds(ctx, n.kind, n.lowerBound, b) != nil {
+ continue
+ }
+ }
+ a = append(a, c)
+ kind &= c.Kind()
+ }
+
+ if kind&^n.kind != 0 {
+ a = append(a, &BasicType{
+ Src: n.kindExpr.Source(), // TODO:Is this always a BasicType?
+ K: n.kind,
+ })
+ }
+
+ var v BaseValue
+ switch len(a) {
+ case 0:
+ // Src is the combined input.
+ v = &BasicType{K: n.kind}
+
+ case 1:
+ v = a[0].(Value) // remove cast
+
+ default:
+ v = &Conjunction{Values: a}
+ }
+
+ return v
+}
+
+// TODO: this function can probably go as this is now handled in the nodeContext.
+func (n *nodeContext) maybeSetCache() {
+ if n.node.Status() > Partial { // n.node.BaseValue != nil
+ return
+ }
+ if n.scalar != nil {
+ n.node.BaseValue = n.scalar
+ }
+ // NOTE: this is now handled by associating the nodeContext
+ // if n.errs != nil {
+ // n.node.SetValue(n.ctx, Partial, n.errs)
+ // }
+}
+
+type envExpr struct {
+ c Conjunct
+ err *Bottom
+}
+
+type envDynamic struct {
+ env *Environment
+ field *DynamicField
+ id CloseInfo
+ err *Bottom
+}
+
+type envList struct {
+ env *Environment
+ list *ListLit
+ n int64 // recorded length after evaluator
+ elipsis *Ellipsis
+ id CloseInfo
+}
+
+func (n *nodeContext) addBottom(b *Bottom) {
+ n.errs = CombineErrors(nil, n.errs, b)
+ // TODO(errors): consider doing this
+ // n.kindExpr = n.errs
+ // n.kind = 0
+}
+
+func (n *nodeContext) addErr(err errors.Error) {
+ if err != nil {
+ n.addBottom(&Bottom{Err: err})
+ }
+}
+
+// addExprConjuncts will attempt to evaluate an Expr and insert the value
+// into the nodeContext if successful or queue it for later evaluation if it is
+// incomplete or is not value.
+func (n *nodeContext) addExprConjunct(v Conjunct) {
+ env := v.Env
+ id := v.CloseInfo
+
+ switch x := v.Elem().(type) {
+ case *Vertex:
+ if x.IsData() {
+ n.addValueConjunct(env, x, id)
+ } else {
+ n.addVertexConjuncts(v, x, true)
+ }
+
+ case Value:
+ n.addValueConjunct(env, x, id)
+
+ case *BinaryExpr:
+ if x.Op == AndOp {
+ n.addExprConjunct(MakeConjunct(env, x.X, id))
+ n.addExprConjunct(MakeConjunct(env, x.Y, id))
+ } else {
+ n.evalExpr(v)
+ }
+
+ case *StructLit:
+ n.addStruct(env, x, id)
+
+ case *ListLit:
+ childEnv := &Environment{
+ Up: env,
+ Vertex: n.node,
+ }
+ if env != nil {
+ childEnv.Cyclic = env.Cyclic
+ childEnv.Deref = env.Deref
+ }
+ n.lists = append(n.lists, envList{env: childEnv, list: x, id: id})
+
+ case *DisjunctionExpr:
+ n.addDisjunction(env, x, id)
+
+ default:
+ // Must be Resolver or Evaluator.
+ n.evalExpr(v)
+ }
+}
+
+// evalExpr is only called by addExprConjunct. If an error occurs, it records
+// the error in n and returns nil.
+func (n *nodeContext) evalExpr(v Conjunct) {
+ // Require an Environment.
+ ctx := n.ctx
+
+ closeID := v.CloseInfo
+
+ // TODO: see if we can do without these counters.
+ for _, d := range v.Env.Deref {
+ d.EvalCount++
+ }
+ for _, d := range v.Env.Cycles {
+ d.SelfCount++
+ }
+ defer func() {
+ for _, d := range v.Env.Deref {
+ d.EvalCount--
+ }
+ for _, d := range v.Env.Cycles {
+ d.SelfCount++
+ }
+ }()
+
+ switch x := v.Expr().(type) {
+ case Resolver:
+ arc, err := ctx.Resolve(v.Env, x)
+ if err != nil && !err.IsIncomplete() {
+ n.addBottom(err)
+ break
+ }
+ if arc == nil {
+ n.exprs = append(n.exprs, envExpr{v, err})
+ break
+ }
+
+ n.addVertexConjuncts(v, arc, false)
+
+ case Evaluator:
+ // Interpolation, UnaryExpr, BinaryExpr, CallExpr
+ // Could be unify?
+ val := ctx.evaluateRec(v.Env, v.Expr(), Partial)
+ if b, ok := val.(*Bottom); ok && b.IsIncomplete() {
+ n.exprs = append(n.exprs, envExpr{v, b})
+ break
+ }
+
+ if v, ok := val.(*Vertex); ok {
+ // Handle generated disjunctions (as in the 'or' builtin).
+ // These come as a Vertex, but should not be added as a value.
+ b, ok := v.BaseValue.(*Bottom)
+ if ok && b.IsIncomplete() && len(v.Conjuncts) > 0 {
+ for _, c := range v.Conjuncts {
+ c.CloseInfo = closeID
+ n.addExprConjunct(c)
+ }
+ break
+ }
+ }
+
+ // TODO: also to through normal Vertex handling here. At the moment
+ // addValueConjunct handles StructMarker.NeedsClose, as this is always
+ // only needed when evaluation an Evaluator, and not a Resolver.
+ // The two code paths should ideally be merged once this separate
+ // mechanism is eliminated.
+ //
+ // if arc, ok := val.(*Vertex); ok && !arc.IsData() {
+ // n.addVertexConjuncts(v.Env, closeID, v.Expr(), arc)
+ // break
+ // }
+
+ // TODO: insert in vertex as well
+ n.addValueConjunct(v.Env, val, closeID)
+
+ default:
+ panic(fmt.Sprintf("unknown expression of type %T", x))
+ }
+}
+
+func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) {
+ closeInfo := c.CloseInfo
+
+ // We need to ensure that each arc is only unified once (or at least) a
+ // bounded time, witch each conjunct. Comprehensions, for instance, may
+ // distribute a value across many values that get unified back into the
+ // same value. If such a value is a disjunction, than a disjunction of N
+ // disjuncts will result in a factor N more unifications for each
+ // occurrence of such value, resulting in exponential running time. This
+ // is especially common values that are used as a type.
+ //
+ // However, unification is idempotent, so each such conjunct only needs
+ // to be unified once. This cache checks for this and prevents an
+ // exponential blowup in such case.
+ //
+ // TODO(perf): this cache ensures the conjuncts of an arc at most once
+ // per ID. However, we really need to add the conjuncts of an arc only
+ // once total, and then add the close information once per close ID
+ // (pointer can probably be shared). Aside from being more performant,
+ // this is probably the best way to guarantee that conjunctions are
+ // linear in this case.
+ key := arcKey{arc, closeInfo}
+ for _, k := range n.arcMap {
+ if key == k {
+ return
+ }
+ }
+ n.arcMap = append(n.arcMap, key)
+
+ env := c.Env
+ // Pass detection of structural cycles from parent to children.
+ cyclic := false
+ if env != nil {
+ // If a reference is in a tainted set, so is the value it refers to.
+ cyclic = env.Cyclic
+ }
+
+ status := arc.Status()
+
+ switch status {
+ case Evaluating:
+ // Reference cycle detected. We have reached a fixed point and
+ // adding conjuncts at this point will not change the value. Also,
+ // continuing to pursue this value will result in an infinite loop.
+
+ // TODO: add a mechanism so that the computation will only have to
+ // be done once?
+
+ if arc == n.node {
+ // TODO: we could use node sharing here. This may avoid an
+ // exponential blowup during evaluation, like is possible with
+ // YAML.
+ return
+ }
+
+ case EvaluatingArcs:
+ // Structural cycle detected. Continue evaluation as usual, but
+ // keep track of whether any other conjuncts without structural
+ // cycles are added. If not, evaluation of child arcs will end
+ // with this node.
+
+ // For the purpose of determining whether at least one non-cyclic
+ // conjuncts exists, we consider all conjuncts of a cyclic conjuncts
+ // also cyclic.
+
+ cyclic = true
+ n.hasCycle = true
+
+ // As the EvaluatingArcs mechanism bypasses the self-reference
+ // mechanism, we need to separately keep track of it here.
+ // If this (originally) is a self-reference node, adding them
+ // will result in recursively adding the same reference. For this
+ // we also mark the node as evaluating.
+ if arc.SelfCount > 0 {
+ return
+ }
+
+ // This count is added for values that are directly added below.
+ // The count is handled separately for delayed values.
+ arc.SelfCount++
+ defer func() { arc.SelfCount-- }()
+ }
+
+ // Performance: the following if check filters cases that are not strictly
+ // necessary for correct functioning. Not updating the closeInfo may cause
+ // some position information to be lost for top-level positions of merges
+ // resulting form APIs. These tend to be fairly uninteresting.
+ // At the same time, this optimization may prevent considerable slowdown
+ // in case an API does many calls to Unify.
+ x := c.Expr()
+ if !inline || arc.IsClosedStruct() || arc.IsClosedList() {
+ closeInfo = closeInfo.SpawnRef(arc, IsDef(x), x)
+ }
+
+ if arc.status == 0 && !inline {
+ // This is a rare condition, but can happen in certain
+ // evaluation orders. Unfortunately, adding this breaks
+ // resolution of cyclic mutually referring disjunctions. But it
+ // is necessary to prevent lookups in unevaluated structs.
+ // TODO(cycles): this can probably most easily be fixed with a
+ // having a more recursive implementation.
+ n.ctx.Unify(arc, Partial)
+ }
+
+ for _, c := range arc.Conjuncts {
+ var a []*Vertex
+ if env != nil {
+ a = env.Deref
+ }
+ if inline {
+ c = updateCyclic(c, cyclic, nil, nil)
+ } else {
+ c = updateCyclic(c, cyclic, arc, a)
+ }
+
+ // Note that we are resetting the tree here. We hereby assume that
+ // closedness conflicts resulting from unifying the referenced arc were
+ // already caught there and that we can ignore further errors here.
+ c.CloseInfo = closeInfo
+ n.addExprConjunct(c)
+ }
+}
+
+// isDef reports whether an expressions is a reference that references a
+// definition anywhere in its selection path.
+//
+// TODO(performance): this should be merged with resolve(). But for now keeping
+// this code isolated makes it easier to see what it is for.
+func isDef(x Expr) bool {
+ switch r := x.(type) {
+ case *FieldReference:
+ return r.Label.IsDef()
+
+ case *SelectorExpr:
+ if r.Sel.IsDef() {
+ return true
+ }
+ return isDef(r.X)
+
+ case *IndexExpr:
+ return isDef(r.X)
+ }
+ return false
+}
+
+// updateCyclicStatus looks for proof of non-cyclic conjuncts to override
+// a structural cycle.
+func (n *nodeContext) updateCyclicStatus(env *Environment) {
+ if env == nil || !env.Cyclic {
+ n.hasNonCycle = true
+ }
+}
+
+func updateCyclic(c Conjunct, cyclic bool, deref *Vertex, a []*Vertex) Conjunct {
+ env := c.Env
+ switch {
+ case env == nil:
+ if !cyclic && deref == nil {
+ return c
+ }
+ env = &Environment{Cyclic: cyclic}
+ case deref == nil && env.Cyclic == cyclic && len(a) == 0:
+ return c
+ default:
+ // The conjunct may still be in use in other fields, so we should
+ // make a new copy to mark Cyclic only for this case.
+ e := *env
+ e.Cyclic = e.Cyclic || cyclic
+ env = &e
+ }
+ if deref != nil || len(a) > 0 {
+ cp := make([]*Vertex, 0, len(a)+1)
+ cp = append(cp, a...)
+ if deref != nil {
+ cp = append(cp, deref)
+ }
+ env.Deref = cp
+ }
+ if deref != nil {
+ env.Cycles = append(env.Cycles, deref)
+ }
+ return MakeConjunct(env, c.Elem(), c.CloseInfo)
+}
+
+func (n *nodeContext) addValueConjunct(env *Environment, v Value, id CloseInfo) {
+ n.updateCyclicStatus(env)
+
+ ctx := n.ctx
+
+ if x, ok := v.(*Vertex); ok {
+ if m, ok := x.BaseValue.(*StructMarker); ok {
+ n.aStruct = x
+ n.aStructID = id
+ if m.NeedClose {
+ id = id.SpawnRef(x, IsDef(x), x)
+ id.IsClosed = true
+ }
+ }
+
+ cyclic := env != nil && env.Cyclic
+
+ if !x.IsData() {
+ // TODO: this really shouldn't happen anymore.
+ if isComplexStruct(ctx, x) {
+ // This really shouldn't happen, but just in case.
+ n.addVertexConjuncts(MakeConjunct(env, x, id), x, true)
+ return
+ }
+
+ for _, c := range x.Conjuncts {
+ c = updateCyclic(c, cyclic, nil, nil)
+ c.CloseInfo = id
+ n.addExprConjunct(c) // TODO: Pass from eval
+ }
+ return
+ }
+
+ // TODO: evaluate value?
+ switch v := x.BaseValue.(type) {
+ default:
+ panic(fmt.Sprintf("invalid type %T", x.BaseValue))
+
+ case *ListMarker:
+ n.vLists = append(n.vLists, x)
+ return
+
+ case *StructMarker:
+
+ case Value:
+ n.addValueConjunct(env, v, id)
+ }
+
+ if len(x.Arcs) == 0 {
+ return
+ }
+
+ s := &StructLit{}
+
+ // Keep ordering of Go struct for topological sort.
+ n.node.AddStruct(s, env, id)
+ n.node.Structs = append(n.node.Structs, x.Structs...)
+
+ for _, a := range x.Arcs {
+ // TODO(errors): report error when this is a regular field.
+ c := MakeConjunct(nil, a, id)
+ c = updateCyclic(c, cyclic, nil, nil)
+ n.insertField(a.Label, c)
+ s.MarkField(a.Label)
+ }
+ return
+ }
+
+ switch b := v.(type) {
+ case *Bottom:
+ n.addBottom(b)
+ return
+ case *Builtin:
+ if v := b.BareValidator(); v != nil {
+ n.addValueConjunct(env, v, id)
+ return
+ }
+ }
+
+ if !n.updateNodeType(v.Kind(), v, id) {
+ return
+ }
+
+ switch x := v.(type) {
+ case *Disjunction:
+ n.addDisjunctionValue(env, x, id)
+
+ case *Conjunction:
+ for _, x := range x.Values {
+ n.addValueConjunct(env, x, id)
+ }
+
+ case *Top:
+ n.hasTop = true
+
+ case *BasicType:
+ // handled above
+
+ case *BoundValue:
+ switch x.Op {
+ case LessThanOp, LessEqualOp:
+ if y := n.upperBound; y != nil {
+ n.upperBound = nil
+ v := SimplifyBounds(ctx, n.kind, x, y)
+ if err := valueError(v); err != nil {
+ err.AddPosition(v)
+ err.AddPosition(n.upperBound)
+ err.AddClosedPositions(id)
+ }
+ n.addValueConjunct(env, v, id)
+ return
+ }
+ n.upperBound = x
+
+ case GreaterThanOp, GreaterEqualOp:
+ if y := n.lowerBound; y != nil {
+ n.lowerBound = nil
+ v := SimplifyBounds(ctx, n.kind, x, y)
+ if err := valueError(v); err != nil {
+ err.AddPosition(v)
+ err.AddPosition(n.lowerBound)
+ err.AddClosedPositions(id)
+ }
+ n.addValueConjunct(env, v, id)
+ return
+ }
+ n.lowerBound = x
+
+ case EqualOp, NotEqualOp, MatchOp, NotMatchOp:
+ // This check serves as simplifier, but also to remove duplicates.
+ k := 0
+ match := false
+ for _, c := range n.checks {
+ if y, ok := c.(*BoundValue); ok {
+ switch z := SimplifyBounds(ctx, n.kind, x, y); {
+ case z == y:
+ match = true
+ case z == x:
+ continue
+ }
+ }
+ n.checks[k] = c
+ k++
+ }
+ n.checks = n.checks[:k]
+ if !match {
+ n.checks = append(n.checks, x)
+ }
+ return
+ }
+
+ case Validator:
+ // This check serves as simplifier, but also to remove duplicates.
+ for i, y := range n.checks {
+ if b := SimplifyValidator(ctx, x, y); b != nil {
+ n.checks[i] = b
+ return
+ }
+ }
+ n.updateNodeType(x.Kind(), x, id)
+ n.checks = append(n.checks, x)
+
+ case *Vertex:
+ // handled above.
+
+ case Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit, *Builtin
+ if y := n.scalar; y != nil {
+ if b, ok := BinOp(ctx, EqualOp, x, y).(*Bool); !ok || !b.B {
+ n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id)
+ }
+ // TODO: do we need to explicitly add again?
+ // n.scalar = nil
+ // n.addValueConjunct(c, BinOp(c, EqualOp, x, y))
+ break
+ }
+ n.scalar = x
+ n.scalarID = id
+
+ default:
+ panic(fmt.Sprintf("unknown value type %T", x))
+ }
+
+ if n.lowerBound != nil && n.upperBound != nil {
+ if u := SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil {
+ if err := valueError(u); err != nil {
+ err.AddPosition(n.lowerBound)
+ err.AddPosition(n.upperBound)
+ err.AddClosedPositions(id)
+ }
+ n.lowerBound = nil
+ n.upperBound = nil
+ n.addValueConjunct(env, u, id)
+ }
+ }
+}
+
+func valueError(v Value) *ValueError {
+ if v == nil {
+ return nil
+ }
+ b, _ := v.(*Bottom)
+ if b == nil {
+ return nil
+ }
+ err, _ := b.Err.(*ValueError)
+ if err == nil {
+ return nil
+ }
+ return err
+}
+
+// addStruct collates the declarations of a struct.
+//
+// addStruct fulfills two additional pivotal functions:
+// 1) Implement vertex unification (this happens through De Bruijn indices
+// combined with proper set up of Environments).
+// 2) Implied closedness for definitions.
+//
+func (n *nodeContext) addStruct(
+ env *Environment,
+ s *StructLit,
+ closeInfo CloseInfo) {
+
+ n.updateCyclicStatus(env) // to handle empty structs.
+
+ // NOTE: This is a crucial point in the code:
+ // Unification derferencing happens here. The child nodes are set to
+ // an Environment linked to the current node. Together with the De Bruijn
+ // indices, this determines to which Vertex a reference resolves.
+
+ // TODO(perf): consider using environment cache:
+ // var childEnv *Environment
+ // for _, s := range n.nodeCache.sub {
+ // if s.Up == env {
+ // childEnv = s
+ // }
+ // }
+ childEnv := &Environment{
+ Up: env,
+ Vertex: n.node,
+ }
+ if env != nil {
+ childEnv.Cyclic = env.Cyclic
+ childEnv.Deref = env.Deref
+ }
+
+ s.Init()
+
+ if s.HasEmbed && !s.IsFile() {
+ closeInfo = closeInfo.SpawnGroup(nil)
+ }
+
+ parent := n.node.AddStruct(s, childEnv, closeInfo)
+ closeInfo.IsClosed = false
+ parent.Disable = true // disable until processing is done.
+
+ for _, d := range s.Decls {
+ switch x := d.(type) {
+ case *Field:
+ // handle in next iteration.
+
+ case *DynamicField:
+ n.aStruct = s
+ n.aStructID = closeInfo
+ n.dynamicFields = append(n.dynamicFields, envDynamic{childEnv, x, closeInfo, nil})
+
+ case *Comprehension:
+ n.insertComprehension(childEnv, x, closeInfo)
+
+ case Expr:
+ // add embedding to optional
+
+ // TODO(perf): only do this if addExprConjunct below will result in
+ // a fieldSet. Otherwise the entry will just be removed next.
+ id := closeInfo.SpawnEmbed(x)
+
+ // push and opo embedding type.
+ n.addExprConjunct(MakeConjunct(childEnv, x, id))
+
+ case *OptionalField, *BulkOptionalField, *Ellipsis:
+ // Nothing to do here. Note that the precense of these fields do not
+ // excluded embedded scalars: only when they match actual fields
+ // does it exclude those.
+
+ default:
+ panic("unreachable")
+ }
+ }
+
+ if !s.HasEmbed {
+ n.aStruct = s
+ n.aStructID = closeInfo
+ }
+
+ parent.Disable = false
+
+ for _, d := range s.Decls {
+ switch x := d.(type) {
+ case *Field:
+ if x.Label.IsString() {
+ n.aStruct = s
+ n.aStructID = closeInfo
+ }
+ n.insertField(x.Label, MakeConjunct(childEnv, x, closeInfo))
+ }
+ }
+}
+
+// TODO(perf): if an arc is the only arc with that label added to a Vertex, and
+// if there are no conjuncts of optional fields to be added, then the arc could
+// be added as is until any of these conditions change. This would allow
+// structure sharing in many cases. One should be careful, however, to
+// recursively track arcs of previously unified evaluated vertices ot make this
+// optimization meaningful.
+//
+// An alternative approach to avoid evaluating optional arcs (if we take that
+// route) is to not recursively evaluate those arcs, even for Finalize. This is
+// possible as it is not necessary to evaluate optional arcs to evaluate
+// disjunctions.
+func (n *nodeContext) insertField(f Feature, x Conjunct) *Vertex {
+ ctx := n.ctx
+ arc, _ := n.node.GetArc(ctx, f)
+
+ arc.addConjunct(x)
+
+ switch {
+ case arc.state != nil:
+ s := arc.state
+ switch {
+ case arc.Status() <= AllArcs:
+ // This may happen when a struct has multiple comprehensions, where
+ // the insertion of one of which depends on the outcome of another.
+
+ // TODO: to something more principled by allowing values to
+ // monotonically increase.
+ arc.status = Partial
+ arc.BaseValue = nil
+ s.disjuncts = s.disjuncts[:0]
+ s.disjunctErrs = s.disjunctErrs[:0]
+
+ fallthrough
+
+ default:
+ arc.state.addExprConjunct(x)
+ }
+
+ case arc.Status() == 0:
+ default:
+ n.addBottom(&Bottom{
+ Code: IncompleteError,
+ Err: ctx.NewPosf(pos(x.Field()),
+ "cannot add field %s: was already used",
+ f.SelectorString(ctx)),
+ })
+ }
+ return arc
+}
+
+// expandOne adds dynamic fields to a node until a fixed point is reached.
+// On each iteration, dynamic fields that cannot resolve due to incomplete
+// values are skipped. They will be retried on the next iteration until no
+// progress can be made. Note that a dynamic field may add more dynamic fields.
+//
+// forClauses are processed after all other clauses. A struct may be referenced
+// before it is complete, meaning that fields added by other forms of injection
+// may influence the result of a for clause _after_ it has already been
+// processed. We could instead detect such insertion and feed it to the
+// ForClause to generate another entry or have the for clause be recomputed.
+// This seems to be too complicated and lead to iffy edge cases.
+// TODO(errors): detect when a field is added to a struct that is already used
+// in a for clause.
+func (n *nodeContext) expandOne() (done bool) {
+ // Don't expand incomplete expressions if we detected a cycle.
+ if n.done() || (n.hasCycle && !n.hasNonCycle) {
+ return false
+ }
+
+ var progress bool
+
+ if progress = n.injectDynamic(); progress {
+ return true
+ }
+
+ if progress = n.injectComprehensions(&(n.comprehensions)); progress {
+ return true
+ }
+
+ // Do expressions after comprehensions, as comprehensions can never
+ // refer to embedded scalars, whereas expressions may refer to generated
+ // fields if we were to allow attributes to be defined alongside
+ // scalars.
+ exprs := n.exprs
+ n.exprs = n.exprs[:0]
+ for _, x := range exprs {
+ n.addExprConjunct(x.c)
+
+ // collect and and or
+ }
+ if len(n.exprs) < len(exprs) {
+ return true
+ }
+
+ // No progress, report error later if needed: unification with
+ // disjuncts may resolve this later later on.
+ return false
+}
+
+// injectDynamic evaluates and inserts dynamic declarations.
+func (n *nodeContext) injectDynamic() (progress bool) {
+ ctx := n.ctx
+ k := 0
+
+ a := n.dynamicFields
+ for _, d := range n.dynamicFields {
+ var f Feature
+ v, complete := ctx.Evaluate(d.env, d.field.Key)
+ if !complete {
+ d.err, _ = v.(*Bottom)
+ a[k] = d
+ k++
+ continue
+ }
+ if b, _ := v.(*Bottom); b != nil {
+ n.addValueConjunct(nil, b, d.id)
+ continue
+ }
+ f = ctx.Label(d.field.Key, v)
+ if f.IsInt() {
+ n.addErr(ctx.NewPosf(pos(d.field.Key), "integer fields not supported"))
+ }
+ n.insertField(f, MakeConjunct(d.env, d.field, d.id))
+ }
+
+ progress = k < len(n.dynamicFields)
+
+ n.dynamicFields = a[:k]
+
+ return progress
+}
+
+// addLists
+//
+// TODO: association arrays:
+// If an association array marker was present in a struct, create a struct node
+// instead of a list node. In either case, a node may only have list fields
+// or struct fields and not both.
+//
+// addLists should be run after the fixpoint expansion:
+// - it enforces that comprehensions may not refer to the list itself
+// - there may be no other fields within the list.
+//
+// TODO(embeddedScalars): for embedded scalars, there should be another pass
+// of evaluation expressions after expanding lists.
+func (n *nodeContext) addLists() (oneOfTheLists Expr, anID CloseInfo) {
+ if len(n.lists) == 0 && len(n.vLists) == 0 {
+ return nil, CloseInfo{}
+ }
+
+ isOpen := true
+ max := 0
+ var maxNode Expr
+
+ if m, ok := n.node.BaseValue.(*ListMarker); ok {
+ isOpen = m.IsOpen
+ max = len(n.node.Arcs)
+ }
+
+ c := n.ctx
+
+ for _, l := range n.vLists {
+ oneOfTheLists = l
+
+ elems := l.Elems()
+ isClosed := l.IsClosedList()
+
+ switch {
+ case len(elems) < max:
+ if isClosed {
+ n.invalidListLength(len(elems), max, l, maxNode)
+ continue
+ }
+
+ case len(elems) > max:
+ if !isOpen {
+ n.invalidListLength(max, len(elems), maxNode, l)
+ continue
+ }
+ isOpen = !isClosed
+ max = len(elems)
+ maxNode = l
+
+ case isClosed:
+ isOpen = false
+ maxNode = l
+ }
+
+ for _, a := range elems {
+ if a.Conjuncts == nil {
+ x := a.BaseValue.(Value)
+ n.insertField(a.Label, MakeConjunct(nil, x, CloseInfo{}))
+ continue
+ }
+ for _, c := range a.Conjuncts {
+ n.insertField(a.Label, c)
+ }
+ }
+ }
+
+outer:
+ for i, l := range n.lists {
+ n.updateCyclicStatus(l.env.Up)
+
+ index := int64(0)
+ hasComprehension := false
+ for j, elem := range l.list.Elems {
+ switch x := elem.(type) {
+ case *Comprehension:
+ err := c.Yield(l.env, x, func(e *Environment) {
+ label, err := MakeLabel(x.Source(), index, IntLabel)
+ n.addErr(err)
+ index++
+ c := MakeConjunct(e, x.Value, l.id)
+ n.insertField(label, c)
+ })
+ hasComprehension = true
+ if err != nil {
+ n.addBottom(err)
+ continue outer
+ }
+
+ case *Ellipsis:
+ if j != len(l.list.Elems)-1 {
+ n.addErr(c.Newf("ellipsis must be last element in list"))
+ }
+
+ n.lists[i].elipsis = x
+
+ default:
+ label, err := MakeLabel(x.Source(), index, IntLabel)
+ n.addErr(err)
+ index++ // TODO: don't use insertField.
+ n.insertField(label, MakeConjunct(l.env, x, l.id))
+ }
+
+ // Terminate early in case of runaway comprehension.
+ if !isOpen && int(index) > max {
+ n.invalidListLength(max, len(l.list.Elems), maxNode, l.list)
+ continue outer
+ }
+ }
+
+ oneOfTheLists = l.list
+ anID = l.id
+
+ switch closed := n.lists[i].elipsis == nil; {
+ case int(index) < max:
+ if closed {
+ n.invalidListLength(int(index), max, l.list, maxNode)
+ continue
+ }
+
+ case int(index) > max,
+ closed && isOpen,
+ (!closed == isOpen) && !hasComprehension:
+ max = int(index)
+ maxNode = l.list
+ isOpen = !closed
+ }
+
+ n.lists[i].n = index
+ }
+
+ // add additionalItem values to list and construct optionals.
+ elems := n.node.Elems()
+ for _, l := range n.vLists {
+ if !l.IsClosedList() {
+ continue
+ }
+
+ newElems := l.Elems()
+ if len(newElems) >= len(elems) {
+ continue // error generated earlier, if applicable.
+ }
+
+ for _, arc := range elems[len(newElems):] {
+ l.MatchAndInsert(c, arc)
+ }
+ }
+
+ for _, l := range n.lists {
+ if l.elipsis == nil {
+ continue
+ }
+
+ s := l.list.info
+ if s == nil {
+ s = &StructLit{Decls: []Decl{l.elipsis}}
+ s.Init()
+ l.list.info = s
+ }
+ info := n.node.AddStruct(s, l.env, l.id)
+
+ for _, arc := range elems[l.n:] {
+ info.MatchAndInsert(c, arc)
+ }
+ }
+
+ sources := []ast.Expr{}
+ // Add conjuncts for additional items.
+ for _, l := range n.lists {
+ if l.elipsis == nil {
+ continue
+ }
+ if src, _ := l.elipsis.Source().(ast.Expr); src != nil {
+ sources = append(sources, src)
+ }
+ }
+
+ if m, ok := n.node.BaseValue.(*ListMarker); !ok {
+ n.node.SetValue(c, Partial, &ListMarker{
+ Src: ast.NewBinExpr(token.AND, sources...),
+ IsOpen: isOpen,
+ })
+ } else {
+ if expr, _ := m.Src.(ast.Expr); expr != nil {
+ sources = append(sources, expr)
+ }
+ m.Src = ast.NewBinExpr(token.AND, sources...)
+ m.IsOpen = m.IsOpen && isOpen
+ }
+
+ n.lists = n.lists[:0]
+ n.vLists = n.vLists[:0]
+
+ return oneOfTheLists, anID
+}
+
+func (n *nodeContext) invalidListLength(na, nb int, a, b Expr) {
+ n.addErr(n.ctx.Newf("incompatible list lengths (%d and %d)", na, nb))
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/expr.go b/vendor/cuelang.org/go/internal/core/adt/expr.go
new file mode 100644
index 0000000000..31aeda234f
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/expr.go
@@ -0,0 +1,1759 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+
+ "github.com/cockroachdb/apd/v2"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// A StructLit represents an unevaluated struct literal or file body.
+type StructLit struct {
+ Src ast.Node // ast.File or ast.StructLit
+ Decls []Decl
+
+ // TODO: record the merge order somewhere.
+
+ // The below fields are redundant to Decls and are computed with Init.
+
+ // field marks the optional conjuncts of all explicit Fields.
+ // Required Fields are marked as empty
+ Fields []FieldInfo
+
+ Dynamic []*DynamicField
+
+ // excluded are all literal fields that already exist.
+ Bulk []*BulkOptionalField
+
+ Additional []*Ellipsis
+ HasEmbed bool
+ IsOpen bool // has a ...
+ initialized bool
+
+ types OptionalType
+
+ // administrative fields like hasreferences.
+ // hasReferences bool
+}
+
+func (o *StructLit) IsFile() bool {
+ _, ok := o.Src.(*ast.File)
+ return ok
+}
+
+type FieldInfo struct {
+ Label Feature
+ Optional []Node
+}
+
+func (x *StructLit) HasOptional() bool {
+ return x.types&(HasField|HasPattern|HasAdditional) != 0
+}
+
+func (x *StructLit) Source() ast.Node { return x.Src }
+
+func (x *StructLit) evaluate(c *OpContext) Value {
+ e := c.Env(0)
+ v := &Vertex{
+ Parent: e.Vertex,
+ Conjuncts: []Conjunct{{e, x, CloseInfo{}}},
+ }
+ // evaluate may not finalize a field, as the resulting value may be
+ // used in a context where more conjuncts are added. It may also lead
+ // to disjuncts being in a partially expanded state, leading to
+ // misaligned nodeContexts.
+ c.Unify(v, AllArcs)
+ return v
+}
+
+// TODO: remove this method
+func (o *StructLit) MarkField(f Feature) {
+ o.Fields = append(o.Fields, FieldInfo{Label: f})
+}
+
+func (o *StructLit) Init() {
+ if o.initialized {
+ return
+ }
+ o.initialized = true
+ for _, d := range o.Decls {
+ switch x := d.(type) {
+ case *Field:
+ if o.fieldIndex(x.Label) < 0 {
+ o.Fields = append(o.Fields, FieldInfo{Label: x.Label})
+ }
+
+ case *OptionalField:
+ p := o.fieldIndex(x.Label)
+ if p < 0 {
+ p = len(o.Fields)
+ o.Fields = append(o.Fields, FieldInfo{Label: x.Label})
+ }
+ o.Fields[p].Optional = append(o.Fields[p].Optional, x)
+ o.types |= HasField
+
+ case *DynamicField:
+ o.Dynamic = append(o.Dynamic, x)
+ o.types |= HasDynamic
+
+ case Expr:
+ o.HasEmbed = true
+
+ case *Comprehension:
+ o.HasEmbed = true
+
+ case *LetClause:
+ o.HasEmbed = true
+
+ case *BulkOptionalField:
+ o.Bulk = append(o.Bulk, x)
+ o.types |= HasPattern
+ switch x.Filter.(type) {
+ case *BasicType, *Top:
+ default:
+ o.types |= HasComplexPattern
+ }
+
+ case *Ellipsis:
+ switch x.Value.(type) {
+ case nil, *Top:
+ o.IsOpen = true
+ o.types |= IsOpen
+
+ default:
+ // TODO: consider only adding for non-top.
+ o.types |= HasAdditional
+ }
+ o.Additional = append(o.Additional, x)
+
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+func (o *StructLit) fieldIndex(f Feature) int {
+ for i := range o.Fields {
+ if o.Fields[i].Label == f {
+ return i
+ }
+ }
+ return -1
+}
+
+func (o *StructLit) OptionalTypes() OptionalType {
+ return o.types
+}
+
+func (o *StructLit) IsOptional(label Feature) bool {
+ for _, f := range o.Fields {
+ if f.Label == label && len(f.Optional) > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// FIELDS
+//
+// Fields can also be used as expressions whereby the value field is the
+// expression this allows retaining more context.
+
+// Field represents a field with a fixed label. It can be a regular field,
+// definition or hidden field.
+//
+// foo: bar
+// #foo: bar
+// _foo: bar
+//
+// Legacy:
+//
+// Foo :: bar
+//
+type Field struct {
+ Src *ast.Field
+
+ Label Feature
+ Value Expr
+}
+
+func (x *Field) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+// An OptionalField represents an optional regular field.
+//
+// foo?: expr
+//
+type OptionalField struct {
+ Src *ast.Field
+ Label Feature
+ Value Expr
+}
+
+func (x *OptionalField) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+// A BulkOptionalField represents a set of optional field.
+//
+// [expr]: expr
+//
+type BulkOptionalField struct {
+ Src *ast.Field // Elipsis or Field
+ Filter Expr
+ Value Expr
+ Label Feature // for reference and formatting
+}
+
+func (x *BulkOptionalField) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+// A Ellipsis represents a set of optional fields of a given type.
+//
+// ...T
+//
+type Ellipsis struct {
+ Src *ast.Ellipsis
+ Value Expr
+}
+
+func (x *Ellipsis) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+// A DynamicField represents a regular field for which the key is computed.
+//
+// "\(expr)": expr
+// (expr): expr
+//
+type DynamicField struct {
+ Src *ast.Field
+ Key Expr
+ Value Expr
+}
+
+func (x *DynamicField) IsOptional() bool {
+ return x.Src.Optional != token.NoPos
+}
+
+func (x *DynamicField) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+// A ListLit represents an unevaluated list literal.
+//
+// [a, for x in src { ... }, b, ...T]
+//
+type ListLit struct {
+ Src *ast.ListLit
+
+ // scalars, comprehensions, ...T
+ Elems []Elem
+
+ info *StructLit // Shared closedness info.
+}
+
+func (x *ListLit) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *ListLit) evaluate(c *OpContext) Value {
+ e := c.Env(0)
+ v := &Vertex{
+ Parent: e.Vertex,
+ Conjuncts: []Conjunct{{e, x, CloseInfo{}}},
+ }
+ // TODO: should be AllArcs and then use Finalize for builtins?
+ c.Unify(v, Finalized) // TODO: also partial okay?
+ return v
+}
+
+// Null represents null. It can be used as a Value and Expr.
+type Null struct {
+ Src ast.Node
+}
+
+func (x *Null) Source() ast.Node { return x.Src }
+func (x *Null) Kind() Kind { return NullKind }
+
+// Bool is a boolean value. It can be used as a Value and Expr.
+type Bool struct {
+ Src ast.Node
+ B bool
+}
+
+func (x *Bool) Source() ast.Node { return x.Src }
+func (x *Bool) Kind() Kind { return BoolKind }
+
+// Num is a numeric value. It can be used as a Value and Expr.
+type Num struct {
+ Src ast.Node
+ K Kind // needed?
+ X apd.Decimal // Is integer if the apd.Decimal is an integer.
+}
+
+// TODO: do we need this?
+// func NewNumFromString(src ast.Node, s string) Value {
+// n := &Num{Src: src, K: IntKind}
+// if strings.ContainsAny(s, "eE.") {
+// n.K = FloatKind
+// }
+// _, _, err := n.X.SetString(s)
+// if err != nil {
+// pos := token.NoPos
+// if src != nil {
+// pos = src.Pos()
+// }
+// return &Bottom{Err: errors.Newf(pos, "invalid number: %v", err)}
+// }
+// return n
+// }
+
+func (x *Num) Source() ast.Node { return x.Src }
+func (x *Num) Kind() Kind { return x.K }
+
+// TODO: do we still need this?
+// func (x *Num) Specialize(k Kind) Value {
+// k = k & x.K
+// if k == x.K {
+// return x
+// }
+// y := *x
+// y.K = k
+// return &y
+// }
+
+// String is a string value. It can be used as a Value and Expr.
+type String struct {
+ Src ast.Node
+ Str string
+ RE *regexp.Regexp // only set if needed
+}
+
+func (x *String) Source() ast.Node { return x.Src }
+func (x *String) Kind() Kind { return StringKind }
+
+// Bytes is a bytes value. It can be used as a Value and Expr.
+type Bytes struct {
+ Src ast.Node
+ B []byte
+ RE *regexp.Regexp // only set if needed
+}
+
+func (x *Bytes) Source() ast.Node { return x.Src }
+func (x *Bytes) Kind() Kind { return BytesKind }
+
+// Composites: the evaluated fields of a composite are recorded in the arc
+// vertices.
+
+type ListMarker struct {
+ Src ast.Node
+ IsOpen bool
+}
+
+func (x *ListMarker) Source() ast.Node { return x.Src }
+func (x *ListMarker) Kind() Kind { return ListKind }
+func (x *ListMarker) node() {}
+
+type StructMarker struct {
+ // NeedClose is used to signal that the evaluator should close this struct.
+ // It is only set by the close builtin.
+ NeedClose bool
+}
+
+func (x *StructMarker) Source() ast.Node { return nil }
+func (x *StructMarker) Kind() Kind { return StructKind }
+func (x *StructMarker) node() {}
+
+// Top represents all possible values. It can be used as a Value and Expr.
+type Top struct{ Src *ast.Ident }
+
+func (x *Top) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+func (x *Top) Kind() Kind { return TopKind }
+
+// BasicType represents all values of a certain Kind. It can be used as a Value
+// and Expr.
+//
+// string
+// int
+// num
+// bool
+//
+type BasicType struct {
+ Src ast.Node
+ K Kind
+}
+
+func (x *BasicType) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+func (x *BasicType) Kind() Kind { return x.K }
+
+// TODO: do we still need this?
+// func (x *BasicType) Specialize(k Kind) Value {
+// k = x.K & k
+// if k == x.K {
+// return x
+// }
+// y := *x
+// y.K = k
+// return &y
+// }
+
+// TODO: should we use UnaryExpr for Bound now we have BoundValue?
+
+// BoundExpr represents an unresolved unary comparator.
+//
+// Concrete {
+ return ctx.NewErrf("bound has fixed non-concrete value")
+ }
+ return &BoundValue{x.Src, x.Op, v}
+ }
+
+ // This simplifies boundary expressions. It is an alternative to an
+ // evaluation strategy that makes nodes increasingly more specific.
+ //
+ // For instance, a completely different implementation would be to allow
+ // the precense of a concrete value to ignore incomplete errors.
+ //
+ // TODO: consider an alternative approach.
+ switch y := v.(type) {
+ case *BoundValue:
+ switch {
+ case y.Op == NotEqualOp:
+ switch x.Op {
+ case LessEqualOp, LessThanOp, GreaterEqualOp, GreaterThanOp:
+ // <(!=3) => number
+ // Smaller than an arbitrarily large number is any number.
+ return &BasicType{K: y.Kind()}
+ case NotEqualOp:
+ // !=(!=3) ==> 3
+ // Not a value that is anything but a given value is that
+ // given value.
+ return y.Value
+ }
+
+ case x.Op == NotEqualOp:
+ // Invert if applicable.
+ switch y.Op {
+ case LessEqualOp:
+ return &BoundValue{x.Src, GreaterThanOp, y.Value}
+ case LessThanOp:
+ return &BoundValue{x.Src, GreaterEqualOp, y.Value}
+ case GreaterEqualOp:
+ return &BoundValue{x.Src, LessThanOp, y.Value}
+ case GreaterThanOp:
+ return &BoundValue{x.Src, LessEqualOp, y.Value}
+ }
+
+ case (x.Op == LessThanOp || x.Op == LessEqualOp) &&
+ (y.Op == GreaterThanOp || y.Op == GreaterEqualOp),
+ (x.Op == GreaterThanOp || x.Op == GreaterEqualOp) &&
+ (y.Op == LessThanOp || y.Op == LessEqualOp):
+ // <(>=3)
+ // Something smaller than an arbitrarily large number is any number.
+ return &BasicType{K: y.Kind()}
+
+ case x.Op == LessThanOp &&
+ (y.Op == LessEqualOp || y.Op == LessThanOp),
+ x.Op == GreaterThanOp &&
+ (y.Op == GreaterEqualOp || y.Op == GreaterThanOp):
+ // <(<=x) => <=x
+ // Less or equal than something that is less than x is less than x.
+ return y
+ }
+
+ case *BasicType:
+ switch x.Op {
+ case LessEqualOp, LessThanOp, GreaterEqualOp, GreaterThanOp:
+ return y
+ }
+ }
+ if v.Concreteness() > Concrete {
+ // TODO(errors): analyze dependencies of x.Expr to get positions.
+ ctx.addErrf(IncompleteError, token.NoPos, // TODO(errors): use ctx.pos()?
+ "non-concrete value %s for bound %s", x.Expr, x.Op)
+ return nil
+ }
+ return &BoundValue{x.Src, x.Op, v}
+}
+
+// A BoundValue is a fully evaluated unary comparator that can be used to
+// validate other values.
+//
+// <5
+// =~"Name$"
+//
+type BoundValue struct {
+ Src ast.Expr
+ Op Op
+ Value Value
+}
+
+func (x *BoundValue) Source() ast.Node { return x.Src }
+func (x *BoundValue) Kind() Kind {
+ k := x.Value.Kind()
+ switch k {
+ case IntKind, FloatKind, NumKind:
+ return NumKind
+
+ case NullKind:
+ if x.Op == NotEqualOp {
+ return TopKind &^ NullKind
+ }
+ }
+ return k
+}
+
+func (x *BoundValue) validate(c *OpContext, y Value) *Bottom {
+ a := y // Can be list or struct.
+ b := c.scalar(x.Value)
+ if c.HasErr() {
+ return c.Err()
+ }
+
+ switch v := BinOp(c, x.Op, a, b).(type) {
+ case *Bottom:
+ return v
+
+ case *Bool:
+ if v.B {
+ return nil
+ }
+ // TODO(errors): use "invalid value %v (not an %s)" if x is a
+ // predeclared identifier such as `int`.
+ err := c.Newf("invalid value %v (out of bound %s)", y, x)
+ err.AddPosition(y)
+ return &Bottom{Src: c.src, Err: err, Code: EvalError}
+
+ default:
+ panic(fmt.Sprintf("unsupported type %T", v))
+ }
+}
+
+func (x *BoundValue) validateStr(c *OpContext, a string) bool {
+ if str, ok := x.Value.(*String); ok {
+ b := str.Str
+ switch x.Op {
+ case LessEqualOp:
+ return a <= b
+ case LessThanOp:
+ return a < b
+ case GreaterEqualOp:
+ return a >= b
+ case GreaterThanOp:
+ return a > b
+ case EqualOp:
+ return a == b
+ case NotEqualOp:
+ return a != b
+ case MatchOp:
+ return c.regexp(x.Value).MatchString(a)
+ case NotMatchOp:
+ return !c.regexp(x.Value).MatchString(a)
+ }
+ }
+ return x.validate(c, &String{Str: a}) == nil
+}
+
+func (x *BoundValue) validateInt(c *OpContext, a int64) bool {
+ switch n := x.Value.(type) {
+ case *Num:
+ b, err := n.X.Int64()
+ if err != nil {
+ break
+ }
+ switch x.Op {
+ case LessEqualOp:
+ return a <= b
+ case LessThanOp:
+ return a < b
+ case GreaterEqualOp:
+ return a >= b
+ case GreaterThanOp:
+ return a > b
+ case EqualOp:
+ return a == b
+ case NotEqualOp:
+ return a != b
+ }
+ }
+ return x.validate(c, c.NewInt64(a)) == nil
+}
+
+// A NodeLink is used during computation to refer to an existing Vertex.
+// It is used to signal a potential cycle or reference.
+// Note that a NodeLink may be used as a value. This should be taken into
+// account.
+type NodeLink struct {
+ Node *Vertex
+}
+
+func (x *NodeLink) Kind() Kind {
+ return x.Node.Kind()
+}
+func (x *NodeLink) Source() ast.Node { return x.Node.Source() }
+
+func (x *NodeLink) resolve(c *OpContext, state VertexStatus) *Vertex {
+ return x.Node
+}
+
+// A FieldReference represents a lexical reference to a field.
+//
+// a
+//
+type FieldReference struct {
+ Src *ast.Ident
+ UpCount int32
+ Label Feature
+}
+
+func (x *FieldReference) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *FieldReference) resolve(c *OpContext, state VertexStatus) *Vertex {
+ n := c.relNode(x.UpCount)
+ pos := pos(x)
+ return c.lookup(n, pos, x.Label, state)
+}
+
+// A ValueReference represents a lexical reference to a value.
+//
+// a: X=b
+//
+type ValueReference struct {
+ Src *ast.Ident
+ UpCount int32
+ Label Feature // for informative purposes
+}
+
+func (x *ValueReference) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *ValueReference) resolve(c *OpContext, state VertexStatus) *Vertex {
+ if x.UpCount == 0 {
+ return c.vertex
+ }
+ n := c.relNode(x.UpCount - 1)
+ return n
+}
+
+// A LabelReference refers to the string or integer value of a label.
+//
+// [X=Pattern]: b: X
+//
+type LabelReference struct {
+ Src *ast.Ident
+ UpCount int32
+}
+
+// TODO: should this implement resolver at all?
+
+func (x *LabelReference) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *LabelReference) evaluate(ctx *OpContext) Value {
+ label := ctx.relLabel(x.UpCount)
+ if label == 0 {
+ // There is no label. This may happen if a LabelReference is evaluated
+ // outside of the context of a parent node, for instance if an
+ // "additional" items or properties is evaluated in isolation.
+ //
+ // TODO: this should return the pattern of the label.
+ return &BasicType{K: StringKind}
+ }
+ return label.ToValue(ctx)
+}
+
+// A DynamicReference is like a LabelReference, but with a computed label.
+//
+// X=(x): X
+// X="\(x)": X
+//
+type DynamicReference struct {
+ Src *ast.Ident
+ UpCount int32
+ Label Expr
+
+ // TODO: only use aliases and store the actual expression only in the scope.
+ // The feature is unique for every instance. This will also allow dynamic
+ // fields to be ordered among normal fields.
+ //
+ // This could also be used to assign labels to embedded values, if they
+ // don't match a label.
+ Alias Feature
+}
+
+func (x *DynamicReference) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *DynamicReference) resolve(ctx *OpContext, state VertexStatus) *Vertex {
+ e := ctx.Env(x.UpCount)
+ frame := ctx.PushState(e, x.Src)
+ v := ctx.value(x.Label)
+ ctx.PopState(frame)
+ f := ctx.Label(x.Label, v)
+ return ctx.lookup(e.Vertex, pos(x), f, state)
+}
+
+// An ImportReference refers to an imported package.
+//
+// import "strings"
+//
+// strings.ToLower("Upper")
+//
+type ImportReference struct {
+ Src *ast.Ident
+ ImportPath Feature
+ Label Feature // for informative purposes
+}
+
+func (x *ImportReference) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *ImportReference) resolve(ctx *OpContext, state VertexStatus) *Vertex {
+ path := x.ImportPath.StringValue(ctx)
+ v := ctx.Runtime.LoadImport(path)
+ if v == nil {
+ ctx.addErrf(EvalError, x.Src.Pos(), "cannot find package %q", path)
+ }
+ return v
+}
+
+// A LetReference evaluates a let expression in its original environment.
+//
+// let X = x
+//
+type LetReference struct {
+ Src *ast.Ident
+ UpCount int32
+ Label Feature // for informative purposes
+ X Expr
+}
+
+func (x *LetReference) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *LetReference) resolve(c *OpContext, state VertexStatus) *Vertex {
+ e := c.Env(x.UpCount)
+ label := e.Vertex.Label
+ if x.X == nil {
+ panic("nil expression")
+ }
+ // Anonymous arc.
+ return &Vertex{Parent: nil, Label: label, Conjuncts: []Conjunct{{e, x.X, CloseInfo{}}}}
+}
+
+func (x *LetReference) evaluate(c *OpContext) Value {
+ e := c.Env(x.UpCount)
+
+ // Not caching let expressions may lead to exponential behavior.
+ return e.evalCached(c, x.X)
+}
+
+// A SelectorExpr looks up a fixed field in an expression.
+//
+// X.Sel
+//
+type SelectorExpr struct {
+ Src *ast.SelectorExpr
+ X Expr
+ Sel Feature
+}
+
+func (x *SelectorExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *SelectorExpr) resolve(c *OpContext, state VertexStatus) *Vertex {
+ // TODO: the node should really be evaluated as AllArcs, but the order
+ // of evaluation is slightly off, causing too much to be evaluated.
+ // This may especially result in incorrect results when using embedded
+ // scalars.
+ n := c.node(x, x.X, x.Sel.IsRegular(), Partial)
+ if n == emptyNode {
+ return n
+ }
+ if n.status == Partial {
+ if b := n.state.incompleteErrors(); b != nil && b.Code < CycleError {
+ n.BaseValue = b
+ return n
+ }
+ }
+ return c.lookup(n, x.Src.Sel.Pos(), x.Sel, state)
+}
+
+// IndexExpr is like a selector, but selects an index.
+//
+// X[Index]
+//
+type IndexExpr struct {
+ Src *ast.IndexExpr
+ X Expr
+ Index Expr
+}
+
+func (x *IndexExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *IndexExpr) resolve(ctx *OpContext, state VertexStatus) *Vertex {
+ // TODO: support byte index.
+ // TODO: the node should really be evaluated as AllArcs, but the order
+ // of evaluation is slightly off, causing too much to be evaluated.
+ // This may especially result in incorrect results when using embedded
+ // scalars.
+ n := ctx.node(x, x.X, true, Partial)
+ i := ctx.value(x.Index)
+ if n == emptyNode {
+ return n
+ }
+ if n.status == Partial {
+ if b := n.state.incompleteErrors(); b != nil && b.Code < CycleError {
+ n.BaseValue = b
+ return n
+ }
+ }
+ f := ctx.Label(x.Index, i)
+ return ctx.lookup(n, x.Src.Index.Pos(), f, state)
+}
+
+// A SliceExpr represents a slice operation. (Not currently in spec.)
+//
+// X[Lo:Hi:Stride]
+//
+type SliceExpr struct {
+ Src *ast.SliceExpr
+ X Expr
+ Lo Expr
+ Hi Expr
+ Stride Expr
+}
+
+func (x *SliceExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *SliceExpr) evaluate(c *OpContext) Value {
+ // TODO: strides
+
+ v := c.value(x.X)
+ const as = "slice index"
+
+ switch v := v.(type) {
+ case nil:
+ c.addErrf(IncompleteError, c.pos(), "non-concrete slice subject %s", x.X)
+ return nil
+ case *Vertex:
+ if !v.IsList() {
+ break
+ }
+
+ var (
+ lo = uint64(0)
+ hi = uint64(len(v.Arcs))
+ )
+ if x.Lo != nil {
+ lo = c.uint64(c.value(x.Lo), as)
+ }
+ if x.Hi != nil {
+ hi = c.uint64(c.value(x.Hi), as)
+ if hi > uint64(len(v.Arcs)) {
+ return c.NewErrf("index %d out of range", hi)
+ }
+ }
+ if lo > hi {
+ return c.NewErrf("invalid slice index: %d > %d", lo, hi)
+ }
+
+ n := c.newList(c.src, v.Parent)
+ for i, a := range v.Arcs[lo:hi] {
+ label, err := MakeLabel(a.Source(), int64(i), IntLabel)
+ if err != nil {
+ c.AddBottom(&Bottom{Src: a.Source(), Err: err})
+ return nil
+ }
+ arc := *a
+ arc.Parent = n
+ arc.Label = label
+ n.Arcs = append(n.Arcs, &arc)
+ }
+ n.status = Finalized
+ return n
+
+ case *Bytes:
+ var (
+ lo = uint64(0)
+ hi = uint64(len(v.B))
+ )
+ if x.Lo != nil {
+ lo = c.uint64(c.value(x.Lo), as)
+ }
+ if x.Hi != nil {
+ hi = c.uint64(c.value(x.Hi), as)
+ if hi > uint64(len(v.B)) {
+ return c.NewErrf("index %d out of range", hi)
+ }
+ }
+ if lo > hi {
+ return c.NewErrf("invalid slice index: %d > %d", lo, hi)
+ }
+ return c.newBytes(v.B[lo:hi])
+ }
+
+ if isError(v) {
+ return v
+ }
+ return c.NewErrf("cannot slice %v (type %s)", v, v.Kind())
+}
+
+// An Interpolation is a string interpolation.
+//
+// "a \(b) c"
+//
+type Interpolation struct {
+ Src *ast.Interpolation
+ K Kind // string or bytes
+ Parts []Expr // odd: strings, even sources
+}
+
+func (x *Interpolation) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *Interpolation) evaluate(c *OpContext) Value {
+ buf := bytes.Buffer{}
+ for _, e := range x.Parts {
+ v := c.value(e)
+ if x.K == BytesKind {
+ buf.Write(c.ToBytes(v))
+ } else {
+ buf.WriteString(c.ToString(v))
+ }
+ }
+ if err := c.Err(); err != nil {
+ err = &Bottom{
+ Code: err.Code,
+ Err: errors.Wrapf(err.Err, pos(x), "invalid interpolation"),
+ }
+ // c.AddBottom(err)
+ // return nil
+ return err
+ }
+ if x.K == BytesKind {
+ return &Bytes{x.Src, buf.Bytes(), nil}
+ }
+ return &String{x.Src, buf.String(), nil}
+}
+
+// UnaryExpr is a unary expression.
+//
+// Op X
+// -X !X +X
+//
+type UnaryExpr struct {
+ Src *ast.UnaryExpr
+ Op Op
+ X Expr
+}
+
+func (x *UnaryExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *UnaryExpr) evaluate(c *OpContext) Value {
+ if !c.concreteIsPossible(x.Op, x.X) {
+ return nil
+ }
+ v := c.value(x.X)
+ if isError(v) {
+ return v
+ }
+
+ op := x.Op
+ k := kind(v)
+ expectedKind := k
+ switch op {
+ case SubtractOp:
+ if v, ok := v.(*Num); ok {
+ f := *v
+ f.X.Neg(&v.X)
+ f.Src = x.Src
+ return &f
+ }
+ expectedKind = NumKind
+
+ case AddOp:
+ if v, ok := v.(*Num); ok {
+ // TODO: wrap in thunk to save position of '+'?
+ return v
+ }
+ expectedKind = NumKind
+
+ case NotOp:
+ if v, ok := v.(*Bool); ok {
+ return &Bool{x.Src, !v.B}
+ }
+ expectedKind = BoolKind
+ }
+ if k&expectedKind != BottomKind {
+ c.addErrf(IncompleteError, pos(x.X),
+ "operand %s of '%s' not concrete (was %s)", x.X, op, k)
+ return nil
+ }
+ return c.NewErrf("invalid operation %s (%s %s)", x, op, k)
+}
+
+// BinaryExpr is a binary expression.
+//
+// X + Y
+// X & Y
+//
+type BinaryExpr struct {
+ Src *ast.BinaryExpr
+ Op Op
+ X Expr
+ Y Expr
+}
+
+func (x *BinaryExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *BinaryExpr) evaluate(c *OpContext) Value {
+ env := c.Env(0)
+ if x.Op == AndOp {
+ // Anonymous Arc
+ v := &Vertex{Conjuncts: []Conjunct{{env, x, CloseInfo{}}}}
+ c.Unify(v, Finalized)
+ return v
+ }
+
+ if !c.concreteIsPossible(x.Op, x.X) || !c.concreteIsPossible(x.Op, x.Y) {
+ return nil
+ }
+
+ // TODO: allow comparing to a literal Bottom only. Find something more
+ // principled perhaps. One should especially take care that two values
+ // evaluating to Bottom don't evaluate to true. For now we check for
+ // Bottom here and require that one of the values be a Bottom literal.
+ if x.Op == EqualOp || x.Op == NotEqualOp {
+ if isLiteralBottom(x.X) {
+ return c.validate(env, x.Src, x.Y, x.Op)
+ }
+ if isLiteralBottom(x.Y) {
+ return c.validate(env, x.Src, x.X, x.Op)
+ }
+ }
+
+ left, _ := c.Concrete(env, x.X, x.Op)
+ right, _ := c.Concrete(env, x.Y, x.Op)
+
+ if err := CombineErrors(x.Src, left, right); err != nil {
+ return err
+ }
+
+ if err := c.Err(); err != nil {
+ return err
+ }
+
+ return BinOp(c, x.Op, left, right)
+}
+
+func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op) (r Value) {
+ s := c.PushState(env, src)
+ if c.nonMonotonicLookupNest == 0 {
+ c.nonMonotonicGeneration++
+ }
+
+ var match bool
+ // NOTE: using Unwrap is maybe note entirely accurate, as it may discard
+ // a future error. However, if it does so, the error will at least be
+ // reported elsewhere.
+ switch b := c.value(x).(type) {
+ case nil:
+ case *Bottom:
+ if b.Code == CycleError {
+ c.PopState(s)
+ c.AddBottom(b)
+ return nil
+ }
+ match = op == EqualOp
+ // We have a nonmonotonic use of a failure. Referenced fields should
+ // not be added anymore.
+ c.nonMonotonicRejectNest++
+ c.evalState(x, Partial)
+ c.nonMonotonicRejectNest--
+
+ default:
+ // TODO(cycle): if EqualOp:
+ // - ensure to pass special status to if clause or keep a track of "hot"
+ // paths.
+ // - evaluate hypothetical struct
+ // - walk over all fields and verify that fields are not contradicting
+ // previously marked fields.
+ //
+ switch {
+ case b.Concreteness() > Concrete:
+ // TODO: mimic comparison to bottom semantics. If it is a valid
+ // value, check for concreteness that this level only. This
+ // should ultimately be replaced with an exists and valid
+ // builtin.
+ match = op == EqualOp
+ default:
+ match = op != EqualOp
+ }
+ c.nonMonotonicLookupNest++
+ c.evalState(x, Partial)
+ c.nonMonotonicLookupNest--
+ }
+
+ c.PopState(s)
+ return &Bool{src, match}
+}
+
+// A CallExpr represents a call to a builtin.
+//
+// len(x)
+// strings.ToLower(x)
+//
+type CallExpr struct {
+ Src *ast.CallExpr
+ Fun Expr
+ Args []Expr
+}
+
+func (x *CallExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *CallExpr) evaluate(c *OpContext) Value {
+ fun := c.value(x.Fun)
+ var b *Builtin
+ switch f := fun.(type) {
+ case *Builtin:
+ b = f
+
+ case *BuiltinValidator:
+ // We allow a validator that takes no arguments accept the validated
+ // value to be called with zero arguments.
+ switch {
+ case f.Src != nil:
+ c.AddErrf("cannot call previously called validator %s", x.Fun)
+
+ case f.Builtin.IsValidator(len(x.Args)):
+ v := *f
+ v.Src = x
+ return &v
+
+ default:
+ b = f.Builtin
+ }
+
+ default:
+ c.AddErrf("cannot call non-function %s (type %s)", x.Fun, kind(fun))
+ return nil
+ }
+ args := []Value{}
+ for i, a := range x.Args {
+ expr := c.value(a)
+ switch v := expr.(type) {
+ case nil:
+ // There SHOULD be an error in the context. If not, we generate
+ // one.
+ c.Assertf(pos(x.Fun), c.HasErr(),
+ "argument %d to function %s is incomplete", i, x.Fun)
+
+ case *Bottom:
+ // TODO(errors): consider adding an argument index for this errors.
+ // On the other hand, this error is really not related to the
+ // argument itself, so maybe it is good as it is.
+ c.AddBottom(v)
+
+ default:
+ args = append(args, expr)
+ }
+ }
+ if c.HasErr() {
+ return nil
+ }
+ if b.IsValidator(len(args)) {
+ return &BuiltinValidator{x, b, args}
+ }
+ result := b.call(c, pos(x), args)
+ if result == nil {
+ return nil
+ }
+ return c.evalState(result, Partial)
+}
+
+// A Builtin is a value representing a native function call.
+type Builtin struct {
+ // TODO: make these values for better type checking.
+ Params []Param
+ Result Kind
+ Func func(c *OpContext, args []Value) Expr
+
+ Package Feature
+ Name string
+}
+
+type Param struct {
+ Name Feature // name of the argument; mostly for documentation
+ Value Value // Could become Value later, using disjunctions for defaults.
+}
+
+// Kind returns the kind mask of this parameter.
+func (p Param) Kind() Kind {
+ return p.Value.Kind()
+}
+
+// Default reports the default value for this Param or nil if there is none.
+func (p Param) Default() Value {
+ d, ok := p.Value.(*Disjunction)
+ if !ok || d.NumDefaults != 1 {
+ return nil
+ }
+ return d.Values[0]
+}
+
+func (x *Builtin) WriteName(w io.Writer, c *OpContext) {
+ _, _ = fmt.Fprintf(w, "%s.%s", x.Package.StringValue(c), x.Name)
+}
+
+// Kind here represents the case where Builtin is used as a Validator.
+func (x *Builtin) Kind() Kind {
+ return FuncKind
+}
+
+func (x *Builtin) BareValidator() *BuiltinValidator {
+ if len(x.Params) != 1 ||
+ (x.Result != BoolKind && x.Result != BottomKind) {
+ return nil
+ }
+ return &BuiltinValidator{Builtin: x}
+}
+
+// IsValidator reports whether b should be interpreted as a Validator for the
+// given number of arguments.
+func (b *Builtin) IsValidator(numArgs int) bool {
+ return numArgs == len(b.Params)-1 &&
+ b.Result&^BoolKind == 0 &&
+ b.Params[numArgs].Default() == nil
+}
+
+func bottom(v Value) *Bottom {
+ if x, ok := v.(*Vertex); ok {
+ v = x.Value()
+ }
+ b, _ := v.(*Bottom)
+ return b
+}
+
+func (x *Builtin) call(c *OpContext, p token.Pos, args []Value) Expr {
+ fun := x // right now always x.
+ if len(args) > len(x.Params) {
+ c.addErrf(0, p,
+ "too many arguments in call to %s (have %d, want %d)",
+ fun, len(args), len(x.Params))
+ return nil
+ }
+ for i := len(args); i < len(x.Params); i++ {
+ v := x.Params[i].Default()
+ if v == nil {
+ c.addErrf(0, p,
+ "not enough arguments in call to %s (have %d, want %d)",
+ fun, len(args), len(x.Params))
+ return nil
+ }
+ args = append(args, v)
+ }
+ for i, a := range args {
+ if x.Params[i].Kind() == BottomKind {
+ continue
+ }
+ if b := bottom(a); b != nil {
+ return b
+ }
+ if k := kind(a); x.Params[i].Kind()&k == BottomKind {
+ code := EvalError
+ b, _ := args[i].(*Bottom)
+ if b != nil {
+ code = b.Code
+ }
+ c.addErrf(code, pos(a),
+ "cannot use %s (type %s) as %s in argument %d to %s",
+ a, k, x.Params[i].Kind(), i+1, fun)
+ return nil
+ }
+ v := x.Params[i].Value
+ if _, ok := v.(*BasicType); !ok {
+ env := c.Env(0)
+ x := &BinaryExpr{Op: AndOp, X: v, Y: a}
+ n := &Vertex{Conjuncts: []Conjunct{{env, x, CloseInfo{}}}}
+ c.Unify(n, Finalized)
+ if _, ok := n.BaseValue.(*Bottom); ok {
+ c.addErrf(0, pos(a),
+ "cannot use %s as %s in argument %d to %s",
+ a, v, i+1, fun)
+ return nil
+ }
+ args[i] = n
+ }
+ }
+ return x.Func(c, args)
+}
+
+func (x *Builtin) Source() ast.Node { return nil }
+
+// A BuiltinValidator is a Value that results from evaluation a partial call
+// to a builtin (using CallExpr).
+//
+// strings.MinRunes(4)
+//
+type BuiltinValidator struct {
+ Src *CallExpr
+ Builtin *Builtin
+ Args []Value // any but the first value
+}
+
+func (x *BuiltinValidator) Source() ast.Node {
+ if x.Src == nil {
+ return x.Builtin.Source()
+ }
+ return x.Src.Source()
+}
+
+func (x *BuiltinValidator) Pos() token.Pos {
+ if src := x.Source(); src != nil {
+ return src.Pos()
+ }
+ return token.NoPos
+}
+
+func (x *BuiltinValidator) Kind() Kind {
+ return x.Builtin.Params[0].Kind()
+}
+
+func (x *BuiltinValidator) validate(c *OpContext, v Value) *Bottom {
+ args := make([]Value, len(x.Args)+1)
+ args[0] = v
+ copy(args[1:], x.Args)
+
+ return validateWithBuiltin(c, x.Pos(), x.Builtin, args)
+}
+
+func validateWithBuiltin(c *OpContext, src token.Pos, b *Builtin, args []Value) *Bottom {
+ var severeness ErrorCode
+ var err errors.Error
+
+ res := b.call(c, src, args)
+ switch v := res.(type) {
+ case nil:
+ return nil
+
+ case *Bottom:
+ if v == nil {
+ return nil // caught elsewhere, but be defensive.
+ }
+ severeness = v.Code
+ err = v.Err
+
+ case *Bool:
+ if v.B {
+ return nil
+ }
+
+ default:
+ return c.NewErrf("invalid validator %s.%s", b.Package.StringValue(c), b.Name)
+ }
+
+ // failed:
+ var buf bytes.Buffer
+ b.WriteName(&buf, c)
+ if len(args) > 1 {
+ buf.WriteString("(")
+ for i, a := range args[1:] {
+ if i > 0 {
+ _, _ = buf.WriteString(", ")
+ }
+ buf.WriteString(c.Str(a))
+ }
+ buf.WriteString(")")
+ }
+
+ vErr := c.NewPosf(src, "invalid value %s (does not satisfy %s)", args[0], buf.String())
+
+ for _, v := range args {
+ vErr.AddPosition(v)
+ }
+
+ return &Bottom{Code: severeness, Err: errors.Wrap(vErr, err)}
+}
+
+// A Disjunction represents a disjunction, where each disjunct may or may not
+// be marked as a default.
+type DisjunctionExpr struct {
+ Src *ast.BinaryExpr
+ Values []Disjunct
+
+ HasDefaults bool
+}
+
+// A Disjunct is used in Disjunction.
+type Disjunct struct {
+ Val Expr
+ Default bool
+}
+
+func (x *DisjunctionExpr) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *DisjunctionExpr) evaluate(c *OpContext) Value {
+ e := c.Env(0)
+ v := &Vertex{Conjuncts: []Conjunct{{e, x, CloseInfo{}}}}
+ c.Unify(v, Finalized) // TODO: also partial okay?
+ // TODO: if the disjunction result originated from a literal value, we may
+ // consider the result closed to create more permanent errors.
+ return v
+}
+
+// A Conjunction is a conjunction of values that cannot be represented as a
+// single value. It is the result of unification.
+type Conjunction struct {
+ Src ast.Expr
+ Values []Value
+}
+
+func (x *Conjunction) Source() ast.Node { return x.Src }
+func (x *Conjunction) Kind() Kind {
+ k := TopKind
+ for _, v := range x.Values {
+ k &= v.Kind()
+ }
+ return k
+}
+
+// A disjunction is a disjunction of values. It is the result of expanding
+// a DisjunctionExpr if the expression cannot be represented as a single value.
+type Disjunction struct {
+ Src ast.Expr
+
+ // Values are the non-error disjuncts of this expression. The first
+ // NumDefault values are default values.
+ Values []*Vertex
+
+ Errors *Bottom // []bottom
+
+ // NumDefaults indicates the number of default values.
+ NumDefaults int
+ HasDefaults bool
+}
+
+func (x *Disjunction) Source() ast.Node { return x.Src }
+func (x *Disjunction) Kind() Kind {
+ k := BottomKind
+ for _, v := range x.Values {
+ k |= v.Kind()
+ }
+ return k
+}
+
+type Comprehension struct {
+ Clauses Yielder
+ Value Expr
+}
+
+func (x *Comprehension) Source() ast.Node {
+ if x.Clauses == nil {
+ return nil
+ }
+ return x.Clauses.Source()
+}
+
+// A ForClause represents a for clause of a comprehension. It can be used
+// as a struct or list element.
+//
+// for k, v in src {}
+//
+type ForClause struct {
+ Syntax *ast.ForClause
+ Key Feature
+ Value Feature
+ Src Expr
+ Dst Yielder
+}
+
+func (x *ForClause) Source() ast.Node {
+ if x.Syntax == nil {
+ return nil
+ }
+ return x.Syntax
+}
+
+func (x *ForClause) yield(c *OpContext, f YieldFunc) {
+ n := c.node(x, x.Src, true, Finalized)
+ for _, a := range n.Arcs {
+ if !a.Label.IsRegular() {
+ continue
+ }
+
+ c.Unify(a, Partial)
+
+ n := &Vertex{status: Finalized}
+
+ if x.Value != InvalidLabel {
+ b := &Vertex{
+ Label: x.Value,
+ BaseValue: a,
+ }
+ n.Arcs = append(n.Arcs, b)
+ }
+
+ if x.Key != InvalidLabel {
+ v := &Vertex{Label: x.Key}
+ key := a.Label.ToValue(c)
+ v.AddConjunct(MakeRootConjunct(c.Env(0), key))
+ v.SetValue(c, Finalized, key)
+ n.Arcs = append(n.Arcs, v)
+ }
+
+ sub := c.spawn(n)
+ saved := c.PushState(sub, x.Dst.Source())
+ x.Dst.yield(c, f)
+ if b := c.PopState(saved); b != nil {
+ c.AddBottom(b)
+ break
+ }
+ if c.HasErr() {
+ break
+ }
+ }
+}
+
+// An IfClause represents an if clause of a comprehension. It can be used
+// as a struct or list element.
+//
+// if cond {}
+//
+type IfClause struct {
+ Src *ast.IfClause
+ Condition Expr
+ Dst Yielder
+}
+
+func (x *IfClause) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *IfClause) yield(ctx *OpContext, f YieldFunc) {
+ if ctx.BoolValue(ctx.value(x.Condition)) {
+ x.Dst.yield(ctx, f)
+ }
+}
+
+// An LetClause represents a let clause in a comprehension.
+//
+// let x = y
+//
+type LetClause struct {
+ Src *ast.LetClause
+ Label Feature
+ Expr Expr
+ Dst Yielder
+}
+
+func (x *LetClause) Source() ast.Node {
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *LetClause) yield(c *OpContext, f YieldFunc) {
+ n := &Vertex{Arcs: []*Vertex{
+ {Label: x.Label, Conjuncts: []Conjunct{{c.Env(0), x.Expr, CloseInfo{}}}},
+ }}
+
+ sub := c.spawn(n)
+ saved := c.PushState(sub, x.Dst.Source())
+ x.Dst.yield(c, f)
+ if b := c.PopState(saved); b != nil {
+ c.AddBottom(b)
+ }
+}
+
+// A ValueClause represents the value part of a comprehension.
+type ValueClause struct {
+ *StructLit
+}
+
+func (x *ValueClause) Source() ast.Node {
+ if x.StructLit == nil {
+ return nil
+ }
+ if x.Src == nil {
+ return nil
+ }
+ return x.Src
+}
+
+func (x *ValueClause) yield(op *OpContext, f YieldFunc) {
+ f(op.Env(0))
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/feature.go b/vendor/cuelang.org/go/internal/core/adt/feature.go
new file mode 100644
index 0000000000..26d6c9301f
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/feature.go
@@ -0,0 +1,324 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+)
+
+// A Feature is an encoded form of a label which comprises a compact
+// representation of an integer or string label as well as a label type.
+type Feature uint32
+
+// TODO: create labels such that list are sorted first (or last with index.)
+
+// InvalidLabel is an encoding of an erroneous label.
+const (
+ InvalidLabel Feature = 0
+
+ // MaxIndex indicates the maximum number of unique strings that are used for
+ // labeles within this CUE implementation.
+ MaxIndex = 1<<(32-indexShift) - 1
+)
+
+// These labels can be used for wildcard queries.
+var (
+ AnyDefinition Feature = makeLabel(MaxIndex, DefinitionLabel)
+ AnyHidden Feature = makeLabel(MaxIndex, HiddenLabel)
+ AnyString Feature = makeLabel(MaxIndex, StringLabel)
+ AnyIndex Feature = makeLabel(MaxIndex, IntLabel)
+)
+
+// A StringIndexer coverts strings to and from an index that is unique for a
+// given string.
+type StringIndexer interface {
+ // ToIndex returns a unique positive index for s (0 < index < 2^28-1).
+ //
+ // For each pair of strings s and t it must return the same index if and
+ // only if s == t.
+ StringToIndex(s string) (index int64)
+
+ // ToString returns a string s for index such that ToIndex(s) == index.
+ IndexToString(index int64) string
+}
+
+// SelectorString reports the shortest string representation of f when used as a
+// selector.
+func (f Feature) SelectorString(index StringIndexer) string {
+ x := f.safeIndex()
+ switch f.Typ() {
+ case IntLabel:
+ return strconv.Itoa(int(x))
+ case StringLabel:
+ s := index.IndexToString(x)
+ if ast.IsValidIdent(s) && !internal.IsDefOrHidden(s) {
+ return s
+ }
+ return literal.String.Quote(s)
+ default:
+ return f.IdentString(index)
+ }
+}
+
+// IdentString reports the identifier of f. The result is undefined if f
+// is not an identifier label.
+func (f Feature) IdentString(index StringIndexer) string {
+ s := index.IndexToString(f.safeIndex())
+ if f.IsHidden() {
+ if p := strings.IndexByte(s, '\x00'); p >= 0 {
+ s = s[:p]
+ }
+ }
+ return s
+}
+
+// PkgID returns the package identifier, composed of the module and package
+// name, associated with this identifier. It will return "" if this is not
+// a hidden label.
+func (f Feature) PkgID(index StringIndexer) string {
+ if !f.IsHidden() {
+ return ""
+ }
+ s := index.IndexToString(f.safeIndex())
+ if p := strings.IndexByte(s, '\x00'); p >= 0 {
+ s = s[p+1:]
+ }
+ return s
+}
+
+// StringValue reports the string value of f, which must be a string label.
+func (f Feature) StringValue(index StringIndexer) string {
+ if !f.IsString() {
+ panic("not a string label")
+ }
+ x := f.safeIndex()
+ return index.IndexToString(x)
+}
+
+// ToValue converts a label to a value, which will be a Num for integer labels
+// and a String for string labels. It panics when f is not a regular label.
+func (f Feature) ToValue(ctx *OpContext) Value {
+ if !f.IsRegular() {
+ panic("not a regular label")
+ }
+ // TODO: Handle special regular values: invalid and AnyRegular.
+ if f.IsInt() {
+ return ctx.NewInt64(int64(f.Index()))
+ }
+ x := f.safeIndex()
+ str := ctx.IndexToString(x)
+ return ctx.NewString(str)
+}
+
+// StringLabel converts s to a string label.
+func (c *OpContext) StringLabel(s string) Feature {
+ return labelFromValue(c, nil, &String{Str: s})
+}
+
+// MakeStringLabel creates a label for the given string.
+func MakeStringLabel(r StringIndexer, s string) Feature {
+ i := r.StringToIndex(s)
+
+ // TODO: set position if it exists.
+ f, err := MakeLabel(nil, i, StringLabel)
+ if err != nil {
+ panic("out of free string slots")
+ }
+ return f
+}
+
+// MakeIdentLabel creates a label for the given identifier.
+func MakeIdentLabel(r StringIndexer, s, pkgpath string) Feature {
+ t := StringLabel
+ switch {
+ case strings.HasPrefix(s, "_#"):
+ t = HiddenDefinitionLabel
+ s = fmt.Sprintf("%s\x00%s", s, pkgpath)
+ case strings.HasPrefix(s, "#"):
+ t = DefinitionLabel
+ case strings.HasPrefix(s, "_"):
+ s = fmt.Sprintf("%s\x00%s", s, pkgpath)
+ t = HiddenLabel
+ }
+ i := r.StringToIndex(s)
+ f, err := MakeLabel(nil, i, t)
+ if err != nil {
+ panic("out of free string slots")
+ }
+ return f
+}
+
+const msgGround = "invalid non-ground value %s (must be concrete %s)"
+
+func labelFromValue(c *OpContext, src Expr, v Value) Feature {
+ var i int64
+ var t FeatureType
+ if isError(v) {
+ return InvalidLabel
+ }
+ switch v.Kind() {
+ case IntKind, NumKind:
+ x, _ := Unwrap(v).(*Num)
+ if x == nil {
+ c.addErrf(IncompleteError, pos(v), msgGround, v, "int")
+ return InvalidLabel
+ }
+ t = IntLabel
+ var err error
+ i, err = x.X.Int64()
+ if err != nil || x.K != IntKind {
+ if src == nil {
+ src = v
+ }
+ c.AddErrf("invalid index %v: %v", src, err)
+ return InvalidLabel
+ }
+ if i < 0 {
+ switch src.(type) {
+ case nil, *Num, *UnaryExpr:
+ // If the value is a constant, we know it is always an error.
+ // UnaryExpr is an approximation for a constant value here.
+ c.AddErrf("invalid index %s (index must be non-negative)", x)
+ default:
+ // Use a different message is it is the result of evaluation.
+ c.AddErrf("index %s out of range [%s]", src, x)
+ }
+ return InvalidLabel
+ }
+
+ case StringKind:
+ x, _ := Unwrap(v).(*String)
+ if x == nil {
+ c.addErrf(IncompleteError, pos(v), msgGround, v, "string")
+ return InvalidLabel
+ }
+ t = StringLabel
+ i = c.StringToIndex(x.Str)
+
+ default:
+ if src != nil {
+ c.AddErrf("invalid index %s (invalid type %v)", src, v.Kind())
+ } else {
+ c.AddErrf("invalid index type %v", v.Kind())
+ }
+ return InvalidLabel
+ }
+
+ // TODO: set position if it exists.
+ f, err := MakeLabel(nil, i, t)
+ if err != nil {
+ c.AddErr(err)
+ }
+ return f
+}
+
+// MakeLabel creates a label. It reports an error if the index is out of range.
+func MakeLabel(src ast.Node, index int64, f FeatureType) (Feature, errors.Error) {
+ if 0 > index || index > MaxIndex-1 {
+ p := token.NoPos
+ if src != nil {
+ p = src.Pos()
+ }
+ return InvalidLabel,
+ errors.Newf(p, "int label out of range (%d not >=0 and <= %d)",
+ index, MaxIndex-1)
+ }
+ return Feature(index)<> indexShift)
+}
+
+// SafeIndex reports the abstract index associated with f, setting MaxIndex to 0.
+func (f Feature) safeIndex() int64 {
+ x := int(f >> indexShift)
+ if x == MaxIndex {
+ x = 0 // Safety, MaxIndex means any
+ }
+ return int64(x)
+}
+
+// TODO: should let declarations be implemented as fields?
+// func (f Feature) isLet() bool { return f.typ() == letLabel }
diff --git a/vendor/cuelang.org/go/internal/core/adt/kind.go b/vendor/cuelang.org/go/internal/core/adt/kind.go
new file mode 100644
index 0000000000..1c3bd7e752
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/kind.go
@@ -0,0 +1,184 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "fmt"
+ "math/bits"
+ "strings"
+)
+
+// Concreteness is a measure of the level of concreteness of a value, where
+// lower values mean more concrete.
+type Concreteness int
+
+const (
+ BottomLevel Concreteness = iota
+
+ // Concrete indicates a concrete scalar value, list or struct.
+ Concrete
+
+ // Constraint indicates a non-concrete scalar value that is more specific,
+ // than a top-level type.
+ Constraint
+
+ // PrimitiveType indicates a top-level specific type, for instance, string,
+ // bytes, number, or bool.
+ Type
+
+ // Any indicates any value, or top.
+ Any
+)
+
+// IsConcrete returns whether a value is concrete.
+func IsConcrete(v Value) bool {
+ if x, ok := v.(*Vertex); ok {
+ return x.IsConcrete()
+ }
+ if v == nil {
+ return false
+ }
+ return v.Concreteness() <= Concrete
+}
+
+// Kind reports the Value kind.
+type Kind uint16
+
+const (
+ NullKind Kind = (1 << iota)
+ BoolKind
+ IntKind
+ FloatKind
+ StringKind
+ BytesKind
+ FuncKind
+ ListKind
+ StructKind
+
+ allKinds
+
+ _numberKind
+
+ NumberKind = IntKind | FloatKind
+
+ BottomKind Kind = 0
+
+ NumKind = IntKind | FloatKind
+ TopKind Kind = (allKinds - 1) // all kinds, but not references
+ ScalarKinds = NullKind | BoolKind |
+ IntKind | FloatKind | StringKind | BytesKind
+)
+
+func kind(v Value) Kind {
+ if v == nil {
+ return BottomKind
+ }
+ return v.Kind()
+}
+
+// IsAnyOf reports whether k is any of the given kinds.
+//
+// For instances, k.IsAnyOf(String|Bytes) reports whether k overlaps with
+// the String or Bytes kind.
+func (k Kind) IsAnyOf(of Kind) bool {
+ return k&of != BottomKind
+}
+
+// CanString reports whether the given type can convert to a string.
+func (k Kind) CanString() bool {
+ return k&StringKind|ScalarKinds != BottomKind
+}
+
+// String returns the representation of the Kind as
+// a CUE expression. For example:
+//
+// (IntKind|ListKind).String()
+//
+// will return:
+//
+// (int|[...])
+func (k Kind) String() string {
+ return toString(k, kindStrs)
+}
+
+// TypeString is like String, but returns a string representation of a valid
+// CUE type.
+func (k Kind) TypeString() string {
+ return toString(k, typeStrs)
+}
+
+func toString(k Kind, m map[Kind]string) string {
+ if k == BottomKind {
+ return "_|_"
+ }
+ if k == TopKind {
+ return "_"
+ }
+ if (k & NumberKind) == NumberKind {
+ k = (k &^ NumberKind) | _numberKind
+ }
+ var buf strings.Builder
+ multiple := bits.OnesCount(uint(k)) > 1
+ if multiple {
+ buf.WriteByte('(')
+ }
+ for count := 0; ; count++ {
+ n := bits.TrailingZeros(uint(k))
+ if n == bits.UintSize {
+ break
+ }
+ bit := Kind(1 << uint(n))
+ k &^= bit
+ s, ok := m[bit]
+ if !ok {
+ s = fmt.Sprintf("bad(%d)", n)
+ }
+ if count > 0 {
+ buf.WriteByte('|')
+ }
+ buf.WriteString(s)
+ }
+ if multiple {
+ buf.WriteByte(')')
+ }
+ return buf.String()
+}
+
+var kindStrs = map[Kind]string{
+ NullKind: "null",
+ BoolKind: "bool",
+ IntKind: "int",
+ FloatKind: "float",
+ StringKind: "string",
+ BytesKind: "bytes",
+ FuncKind: "func",
+ StructKind: "struct",
+ ListKind: "list",
+ _numberKind: "number",
+}
+
+// used to generate a parseable CUE type.
+var typeStrs = map[Kind]string{
+ NullKind: "null",
+ BoolKind: "bool",
+ IntKind: "int",
+ FloatKind: "float",
+ StringKind: "string",
+ BytesKind: "bytes",
+ FuncKind: "_",
+ StructKind: "{...}",
+ ListKind: "[...]",
+ _numberKind: "number",
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/op.go b/vendor/cuelang.org/go/internal/core/adt/op.go
new file mode 100644
index 0000000000..6383290ab6
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/op.go
@@ -0,0 +1,141 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import "cuelang.org/go/cue/token"
+
+// Op indicates the operation at the top of an expression tree of the expression
+// use to evaluate a value.
+type Op int
+
+func (o Op) String() string {
+ return opToString[o]
+}
+
+// Values of Op.
+const (
+ NoOp Op = iota
+
+ AndOp
+ OrOp
+
+ SelectorOp
+ IndexOp
+ SliceOp
+ CallOp
+
+ BoolAndOp
+ BoolOrOp
+
+ EqualOp
+ NotOp
+ NotEqualOp
+ LessThanOp
+ LessEqualOp
+ GreaterThanOp
+ GreaterEqualOp
+
+ MatchOp
+ NotMatchOp
+
+ AddOp
+ SubtractOp
+ MultiplyOp
+ FloatQuotientOp
+ IntQuotientOp
+ IntRemainderOp
+ IntDivideOp
+ IntModuloOp
+
+ InterpolationOp
+)
+
+var opToString = map[Op]string{
+ AndOp: "&",
+ OrOp: "|",
+ BoolAndOp: "&&",
+ BoolOrOp: "||",
+ EqualOp: "==",
+ NotOp: "!",
+ NotEqualOp: "!=",
+ LessThanOp: "<",
+ LessEqualOp: "<=",
+ GreaterThanOp: ">",
+ GreaterEqualOp: ">=",
+ MatchOp: "=~",
+ NotMatchOp: "!~",
+ AddOp: "+",
+ SubtractOp: "-",
+ MultiplyOp: "*",
+ FloatQuotientOp: "/",
+ IntQuotientOp: "quo",
+ IntRemainderOp: "rem",
+ IntDivideOp: "div",
+ IntModuloOp: "mod",
+
+ SelectorOp: ".",
+ IndexOp: "[]",
+ SliceOp: "[:]",
+ CallOp: "()",
+
+ InterpolationOp: `\()`,
+}
+
+// OpFromToken converts a token.Token to an Op.
+func OpFromToken(t token.Token) Op {
+ return tokenMap[t]
+}
+
+// Token returns the token.Token corresponding to the Op.
+func (op Op) Token() token.Token {
+ return opMap[op]
+}
+
+var tokenMap = map[token.Token]Op{
+ token.OR: OrOp, // |
+ token.AND: AndOp, // &
+
+ token.ADD: AddOp, // +
+ token.SUB: SubtractOp, // -
+ token.MUL: MultiplyOp, // *
+ token.QUO: FloatQuotientOp, // /
+
+ token.IDIV: IntDivideOp, // div
+ token.IMOD: IntModuloOp, // mod
+ token.IQUO: IntQuotientOp, // quo
+ token.IREM: IntRemainderOp, // rem
+
+ token.LAND: BoolAndOp, // &&
+ token.LOR: BoolOrOp, // ||
+
+ token.EQL: EqualOp, // ==
+ token.LSS: LessThanOp, // <
+ token.GTR: GreaterThanOp, // >
+ token.NOT: NotOp, // !
+
+ token.NEQ: NotEqualOp, // !=
+ token.LEQ: LessEqualOp, // <=
+ token.GEQ: GreaterEqualOp, // >=
+ token.MAT: MatchOp, // =~
+ token.NMAT: NotMatchOp, // !~
+}
+
+var opMap = map[Op]token.Token{}
+
+func init() {
+ for t, o := range tokenMap {
+ opMap[o] = t
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/optional.go b/vendor/cuelang.org/go/internal/core/adt/optional.go
new file mode 100644
index 0000000000..d4eed63112
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/optional.go
@@ -0,0 +1,137 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// MatchAndInsert finds matching optional parts for a given Arc and adds its
+// conjuncts. Bulk fields are only applied if no fields match, and additional
+// constraints are only added if neither regular nor bulk fields match.
+func (o *StructInfo) MatchAndInsert(c *OpContext, arc *Vertex) {
+ env := o.Env
+
+ closeInfo := o.CloseInfo
+ closeInfo.IsClosed = false
+
+ // Match normal fields
+ matched := false
+outer:
+ for _, f := range o.Fields {
+ if f.Label == arc.Label {
+ for _, e := range f.Optional {
+ arc.AddConjunct(MakeConjunct(env, e, closeInfo))
+ }
+ matched = true
+ break outer
+ }
+ }
+
+ f := arc.Label
+ if !f.IsRegular() {
+ return
+ }
+ var label Value
+
+ if int64(f.Index()) == MaxIndex {
+ f = 0
+ } else if o.types&HasComplexPattern != 0 && f.IsString() {
+ label = f.ToValue(c)
+ }
+
+ if len(o.Bulk) > 0 {
+ bulkEnv := *env
+ bulkEnv.DynamicLabel = f
+ bulkEnv.Deref = nil
+ bulkEnv.Cycles = nil
+
+ // match bulk optional fields / pattern properties
+ for _, b := range o.Bulk {
+ // if matched && f.additional {
+ // continue
+ // }
+ if matchBulk(c, env, b, f, label) {
+ matched = true
+ info := closeInfo.SpawnSpan(b.Value, ConstraintSpan)
+ arc.AddConjunct(MakeConjunct(&bulkEnv, b, info))
+ }
+ }
+ }
+
+ if matched || len(o.Additional) == 0 {
+ return
+ }
+
+ addEnv := *env
+ addEnv.Deref = nil
+ addEnv.Cycles = nil
+
+ // match others
+ for _, x := range o.Additional {
+ info := closeInfo
+ if _, ok := x.expr().(*Top); !ok {
+ info = info.SpawnSpan(x, ConstraintSpan)
+ }
+ // TODO: consider moving in above block (2 lines up).
+ arc.AddConjunct(MakeConjunct(&addEnv, x, info))
+ }
+}
+
+// matchBulk reports whether feature f matches the filter of x. It evaluation of
+// the filter is erroneous, it returns false and the error will be set in c.
+func matchBulk(c *OpContext, env *Environment, x *BulkOptionalField, f Feature, label Value) bool {
+ v := env.evalCached(c, x.Filter)
+ v = Unwrap(v)
+
+ // Fast-track certain cases.
+ switch x := v.(type) {
+ case *Bottom:
+ if c.errs == nil {
+ c.AddBottom(x)
+ }
+ return false
+ case *Top:
+ return true
+
+ case *BasicType:
+ return x.K&StringKind != 0
+
+ case *BoundValue:
+ switch x.Kind() {
+ case StringKind:
+ if label == nil {
+ return false
+ }
+ str := label.(*String).Str
+ return x.validateStr(c, str)
+
+ case IntKind:
+ return x.validateInt(c, int64(f.Index()))
+ }
+ }
+
+ if label == nil {
+ return false
+ }
+
+ n := Vertex{}
+ m := MakeRootConjunct(env, v)
+ n.AddConjunct(m)
+ n.AddConjunct(MakeRootConjunct(m.Env, label))
+
+ c.inConstraint++
+ n.Finalize(c)
+ c.inConstraint--
+
+ b, _ := n.BaseValue.(*Bottom)
+ return b == nil
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/simplify.go b/vendor/cuelang.org/go/internal/core/adt/simplify.go
new file mode 100644
index 0000000000..571800dba0
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/simplify.go
@@ -0,0 +1,223 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "github.com/cockroachdb/apd/v2"
+)
+
+// SimplifyBounds collapses bounds if possible. The bound values must be
+// concrete. It returns nil if the bound values cannot be collapsed.
+//
+// k represents additional type constraints, such as `int`.
+func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value {
+ xv := x.Value
+ yv := y.Value
+
+ cmp, xCat := opInfo(x.Op)
+ _, yCat := opInfo(y.Op)
+
+ // k := x.Kind() & y.Kind()
+
+ switch {
+ case xCat == yCat:
+ switch x.Op {
+ // NOTE: EqualOp should not happen, but include it defensively.
+ // Maybe an API would use it, for instance.
+ case EqualOp, NotEqualOp, MatchOp, NotMatchOp:
+ if test(ctx, EqualOp, xv, yv) {
+ return x
+ }
+ return nil // keep both bounds
+ }
+
+ // xCat == yCat && x.Op != NotEqualOp
+ // > a & >= b
+ // > a if a >= b
+ // >= b if a < b
+ // > a & > b
+ // > a if a >= b
+ // > b if a < b
+ // >= a & > b
+ // >= a if a > b
+ // > b if a <= b
+ // >= a & >= b
+ // >= a if a > b
+ // >= b if a <= b
+ // inverse is true as well.
+
+ // Tighten bound.
+ if test(ctx, cmp, xv, yv) {
+ return x
+ }
+ return y
+
+ case xCat == -yCat:
+ if xCat == -1 {
+ x, y = y, x
+ }
+ a, aOK := xv.(*Num)
+ b, bOK := yv.(*Num)
+
+ if !aOK || !bOK {
+ break
+ }
+
+ var d, lo, hi apd.Decimal
+ lo.Set(&a.X)
+ hi.Set(&b.X)
+ if k&FloatKind == 0 {
+ // Readjust bounds for integers.
+ if x.Op == GreaterEqualOp {
+ // >=3.4 ==> >=4
+ _, _ = apdCtx.Ceil(&lo, &a.X)
+ } else {
+ // >3.4 ==> >3
+ _, _ = apdCtx.Floor(&lo, &a.X)
+ }
+ if y.Op == LessEqualOp {
+ // <=2.3 ==> <= 2
+ _, _ = apdCtx.Floor(&hi, &b.X)
+ } else {
+ // <2.3 ==> < 3
+ _, _ = apdCtx.Ceil(&hi, &b.X)
+ }
+ }
+
+ cond, err := apd.BaseContext.Sub(&d, &hi, &lo)
+ if cond.Inexact() || err != nil {
+ break
+ }
+
+ // attempt simplification
+ // numbers
+ // >=a & <=b
+ // a if a == b
+ // _|_ if a < b
+ // >=a & a & <=b
+ // _|_ if b <= a
+ // >a & =a & <=b
+ // a if b-a == 0
+ // _|_ if a < b
+ // >=a & a & <=b
+ // b if b-a == 1
+ // _|_ if b <= a
+ // >a & = 0; k-- {
+ if c.stack[k].field == f {
+ break
+ }
+ upCount += c.stack[k].upCount
+ }
+
+ label := &adt.LabelReference{
+ Src: n,
+ UpCount: upCount,
+ }
+
+ switch f := n.Node.(type) {
+ case *ast.Field:
+ _ = c.lookupAlias(k, f.Label.(*ast.Alias).Ident) // mark as used
+ return &adt.DynamicReference{
+ Src: n,
+ UpCount: upCount,
+ Label: label,
+ }
+
+ case *ast.Alias:
+ _ = c.lookupAlias(k, f.Ident) // mark as used
+ return &adt.ValueReference{
+ Src: n,
+ UpCount: upCount,
+ Label: c.label(f.Ident),
+ }
+ }
+ return label
+ }
+
+ upCount := int32(0)
+
+ k := len(c.stack) - 1
+ for ; k >= 0; k-- {
+ if c.stack[k].scope == n.Scope {
+ break
+ }
+ upCount += c.stack[k].upCount
+ }
+ if k < 0 {
+ // This is a programmatic error and should never happen if the users
+ // just builds with the cue command or if astutil.Resolve is used
+ // correctly.
+ c.errf(n, "reference %q set to unknown node in AST; "+
+ "this can result from incorrect API usage or a compiler bug",
+ n.Name)
+ }
+
+ if n.Scope == nil {
+ // Package.
+ // Should have been handled above.
+ return c.errf(n, "unresolved identifier %v", n.Name)
+ }
+
+ switch f := n.Node.(type) {
+ // Local expressions
+ case *ast.LetClause:
+ entry := c.lookupAlias(k, n)
+
+ // let x = y
+ return &adt.LetReference{
+ Src: n,
+ UpCount: upCount,
+ Label: label,
+ X: entry.expr,
+ }
+
+ // TODO: handle new-style aliases
+
+ case *ast.Field:
+ // X=x: y
+ // X=(x): y
+ // X="\(x)": y
+ a, ok := f.Label.(*ast.Alias)
+ if !ok {
+ return c.errf(n, "illegal reference %s", n.Name)
+ }
+ aliasInfo := c.lookupAlias(k, a.Ident) // marks alias as used.
+ lab, ok := a.Expr.(ast.Label)
+ if !ok {
+ return c.errf(a.Expr, "invalid label expression")
+ }
+ name, _, err := ast.LabelName(lab)
+ switch {
+ case errors.Is(err, ast.ErrIsExpression):
+ if aliasInfo.expr == nil {
+ panic("unreachable")
+ }
+ return &adt.DynamicReference{
+ Src: n,
+ UpCount: upCount,
+ Label: aliasInfo.expr,
+ }
+
+ case err != nil:
+ return c.errf(n, "invalid label: %v", err)
+
+ case name != "":
+ label = c.label(lab)
+
+ default:
+ return c.errf(n, "unsupported field alias %q", name)
+ }
+ }
+
+ return &adt.FieldReference{
+ Src: n,
+ UpCount: upCount,
+ Label: label,
+ }
+}
+
+func (c *compiler) addDecls(st *adt.StructLit, a []ast.Decl) {
+ for _, d := range a {
+ c.markAlias(d)
+ }
+ for _, d := range a {
+ c.addLetDecl(d)
+ }
+ for _, d := range a {
+ if x := c.decl(d); x != nil {
+ st.Decls = append(st.Decls, x)
+ }
+ }
+}
+
+func (c *compiler) markAlias(d ast.Decl) {
+ switch x := d.(type) {
+ case *ast.Field:
+ lab := x.Label
+ if a, ok := lab.(*ast.Alias); ok {
+ if _, ok = a.Expr.(ast.Label); !ok {
+ c.errf(a, "alias expression is not a valid label")
+ }
+
+ e := aliasEntry{source: a}
+
+ c.insertAlias(a.Ident, e)
+ }
+
+ case *ast.LetClause:
+ a := aliasEntry{
+ label: (*letScope)(x),
+ srcExpr: x.Expr,
+ source: x,
+ }
+ c.insertAlias(x.Ident, a)
+
+ case *ast.Alias:
+ c.errf(x, "old-style alias no longer supported: use let clause; use cue fix to update.")
+ }
+}
+
+func (c *compiler) decl(d ast.Decl) adt.Decl {
+ switch x := d.(type) {
+ case *ast.BadDecl:
+ return c.errf(d, "")
+
+ case *ast.Field:
+ lab := x.Label
+ if a, ok := lab.(*ast.Alias); ok {
+ if lab, ok = a.Expr.(ast.Label); !ok {
+ return c.errf(a, "alias expression is not a valid label")
+ }
+
+ switch lab.(type) {
+ case *ast.Ident, *ast.BasicLit, *ast.ListLit:
+ // Even though we won't need the alias, we still register it
+ // for duplicate and failed reference detection.
+ default:
+ c.updateAlias(a.Ident, c.expr(a.Expr))
+ }
+ }
+
+ v := x.Value
+ var value adt.Expr
+ if a, ok := v.(*ast.Alias); ok {
+ c.pushScope(nil, 0, a)
+ c.insertAlias(a.Ident, aliasEntry{source: a})
+ value = c.labeledExpr(x, (*fieldLabel)(x), a.Expr)
+ c.popScope()
+ } else {
+ value = c.labeledExpr(x, (*fieldLabel)(x), v)
+ }
+
+ switch l := lab.(type) {
+ case *ast.Ident, *ast.BasicLit:
+ label := c.label(lab)
+
+ if label == adt.InvalidLabel {
+ return c.errf(x, "cannot use _ as label")
+ }
+
+ // TODO(legacy): remove: old-school definitions
+ if x.Token == token.ISA && !label.IsDef() {
+ name, isIdent, err := ast.LabelName(lab)
+ if err == nil && isIdent {
+ idx := c.index.StringToIndex(name)
+ label, _ = adt.MakeLabel(x, idx, adt.DefinitionLabel)
+ }
+ }
+
+ if x.Optional == token.NoPos {
+ return &adt.Field{
+ Src: x,
+ Label: label,
+ Value: value,
+ }
+ } else {
+ return &adt.OptionalField{
+ Src: x,
+ Label: label,
+ Value: value,
+ }
+ }
+
+ case *ast.ListLit:
+ if len(l.Elts) != 1 {
+ // error
+ return c.errf(x, "list label must have one element")
+ }
+ var label adt.Feature
+ elem := l.Elts[0]
+ // TODO: record alias for error handling? In principle it is okay
+ // to have duplicates, but we do want it to be used.
+ if a, ok := elem.(*ast.Alias); ok {
+ label = c.label(a.Ident)
+ elem = a.Expr
+ }
+
+ return &adt.BulkOptionalField{
+ Src: x,
+ Filter: c.expr(elem),
+ Value: value,
+ Label: label,
+ }
+
+ case *ast.ParenExpr:
+ if x.Token == token.ISA {
+ c.errf(x, "definitions not supported for dynamic fields")
+ }
+ return &adt.DynamicField{
+ Src: x,
+ Key: c.expr(l),
+ Value: value,
+ }
+
+ case *ast.Interpolation:
+ if x.Token == token.ISA {
+ c.errf(x, "definitions not supported for interpolations")
+ }
+ return &adt.DynamicField{
+ Src: x,
+ Key: c.expr(l),
+ Value: value,
+ }
+ }
+
+ // Handled in addLetDecl.
+ case *ast.LetClause:
+ // case: *ast.Alias: // TODO(value alias)
+
+ case *ast.CommentGroup:
+ // Nothing to do for a free-floating comment group.
+
+ case *ast.Attribute:
+ // Nothing to do for now for an attribute declaration.
+
+ case *ast.Ellipsis:
+ return &adt.Ellipsis{
+ Src: x,
+ Value: c.expr(x.Type),
+ }
+
+ case *ast.Comprehension:
+ return c.comprehension(x)
+
+ case *ast.EmbedDecl: // Deprecated
+ return c.expr(x.Expr)
+
+ case ast.Expr:
+ return c.expr(x)
+ }
+ return nil
+}
+
+func (c *compiler) addLetDecl(d ast.Decl) {
+ switch x := d.(type) {
+ // An alias reference will have an expression that is looked up in the
+ // environment cash.
+ case *ast.LetClause:
+ // Cache the parsed expression. Creating a unique expression for each
+ // reference allows the computation to be shared given that we don't
+ // have fields for expressions. This, in turn, prevents exponential
+ // blowup in x2: x1+x1, x3: x2+x2, ... patterns.
+ expr := c.labeledExpr(nil, (*letScope)(x), x.Expr)
+ c.updateAlias(x.Ident, expr)
+
+ case *ast.Alias:
+ c.errf(x, "old-style alias no longer supported: use let clause; use cue fix to update.")
+ }
+}
+
+func (c *compiler) elem(n ast.Expr) adt.Elem {
+ switch x := n.(type) {
+ case *ast.Ellipsis:
+ return &adt.Ellipsis{
+ Src: x,
+ Value: c.expr(x.Type),
+ }
+
+ case *ast.Comprehension:
+ return c.comprehension(x)
+
+ case ast.Expr:
+ return c.expr(x)
+ }
+ return nil
+}
+
+func (c *compiler) comprehension(x *ast.Comprehension) adt.Elem {
+ var cur adt.Yielder
+ var first adt.Yielder
+ var prev, next *adt.Yielder
+ for _, v := range x.Clauses {
+ switch x := v.(type) {
+ case *ast.ForClause:
+ var key adt.Feature
+ if x.Key != nil {
+ key = c.label(x.Key)
+ }
+ y := &adt.ForClause{
+ Syntax: x,
+ Key: key,
+ Value: c.label(x.Value),
+ Src: c.expr(x.Source),
+ }
+ cur = y
+ c.pushScope((*forScope)(x), 1, v)
+ defer c.popScope()
+ next = &y.Dst
+
+ case *ast.IfClause:
+ y := &adt.IfClause{
+ Src: x,
+ Condition: c.expr(x.Condition),
+ }
+ cur = y
+ next = &y.Dst
+
+ case *ast.LetClause:
+ y := &adt.LetClause{
+ Src: x,
+ Label: c.label(x.Ident),
+ Expr: c.expr(x.Expr),
+ }
+ cur = y
+ c.pushScope((*letScope)(x), 1, v)
+ defer c.popScope()
+ next = &y.Dst
+ }
+
+ if prev != nil {
+ *prev = cur
+ } else {
+ first = cur
+ if _, ok := cur.(*adt.LetClause); ok {
+ return c.errf(x,
+ "first comprehension clause must be 'if' or 'for'")
+ }
+ }
+ prev = next
+ }
+
+ // TODO: make x.Value an *ast.StructLit and this is redundant.
+ if y, ok := x.Value.(*ast.StructLit); !ok {
+ return c.errf(x.Value,
+ "comprehension value must be struct, found %T", y)
+ }
+
+ y := c.expr(x.Value)
+
+ st, ok := y.(*adt.StructLit)
+ if !ok {
+ // Error must have been generated.
+ return y
+ }
+
+ if prev != nil {
+ *prev = &adt.ValueClause{StructLit: st}
+ } else {
+ return c.errf(x, "comprehension value without clauses")
+ }
+
+ return &adt.Comprehension{
+ Clauses: first,
+ Value: st,
+ }
+}
+
+func (c *compiler) labeledExpr(f *ast.Field, lab labeler, expr ast.Expr) adt.Expr {
+ k := len(c.stack) - 1
+ return c.labeledExprAt(k, f, lab, expr)
+}
+
+func (c *compiler) labeledExprAt(k int, f *ast.Field, lab labeler, expr ast.Expr) adt.Expr {
+ if c.stack[k].field != nil {
+ panic("expected nil field")
+ }
+ saved := c.stack[k]
+
+ c.stack[k].label = lab
+ c.stack[k].field = f
+
+ value := c.expr(expr)
+
+ c.stack[k] = saved
+ return value
+}
+
+func (c *compiler) expr(expr ast.Expr) adt.Expr {
+ switch n := expr.(type) {
+ case nil:
+ return nil
+ case *ast.Ident:
+ return c.resolve(n)
+
+ case *ast.StructLit:
+ c.pushScope(nil, 1, n)
+ v := &adt.StructLit{Src: n}
+ c.addDecls(v, n.Elts)
+ c.popScope()
+ return v
+
+ case *ast.ListLit:
+ c.pushScope(nil, 1, n)
+ v := &adt.ListLit{Src: n}
+ elts, ellipsis := internal.ListEllipsis(n)
+ for _, d := range elts {
+ elem := c.elem(d)
+
+ switch x := elem.(type) {
+ case nil:
+ case adt.Elem:
+ v.Elems = append(v.Elems, x)
+ default:
+ c.errf(d, "type %T not allowed in ListLit", d)
+ }
+ }
+ if ellipsis != nil {
+ d := &adt.Ellipsis{
+ Src: ellipsis,
+ Value: c.expr(ellipsis.Type),
+ }
+ v.Elems = append(v.Elems, d)
+ }
+ c.popScope()
+ return v
+
+ case *ast.SelectorExpr:
+ c.inSelector++
+ ret := &adt.SelectorExpr{
+ Src: n,
+ X: c.expr(n.X),
+ Sel: c.label(n.Sel)}
+ c.inSelector--
+ return ret
+
+ case *ast.IndexExpr:
+ return &adt.IndexExpr{
+ Src: n,
+ X: c.expr(n.X),
+ Index: c.expr(n.Index),
+ }
+
+ case *ast.SliceExpr:
+ slice := &adt.SliceExpr{Src: n, X: c.expr(n.X)}
+ if n.Low != nil {
+ slice.Lo = c.expr(n.Low)
+ }
+ if n.High != nil {
+ slice.Hi = c.expr(n.High)
+ }
+ return slice
+
+ case *ast.BottomLit:
+ return &adt.Bottom{
+ Src: n,
+ Code: adt.UserError,
+ Err: errors.Newf(n.Pos(), "explicit error (_|_ literal) in source"),
+ }
+
+ case *ast.BadExpr:
+ return c.errf(n, "invalid expression")
+
+ case *ast.BasicLit:
+ return c.parse(n)
+
+ case *ast.Interpolation:
+ if len(n.Elts) == 0 {
+ return c.errf(n, "invalid interpolation")
+ }
+ first, ok1 := n.Elts[0].(*ast.BasicLit)
+ last, ok2 := n.Elts[len(n.Elts)-1].(*ast.BasicLit)
+ if !ok1 || !ok2 {
+ return c.errf(n, "invalid interpolation")
+ }
+ if len(n.Elts) == 1 {
+ return c.expr(n.Elts[0])
+ }
+ lit := &adt.Interpolation{Src: n}
+ info, prefixLen, _, err := literal.ParseQuotes(first.Value, last.Value)
+ if err != nil {
+ return c.errf(n, "invalid interpolation: %v", err)
+ }
+ if info.IsDouble() {
+ lit.K = adt.StringKind
+ } else {
+ lit.K = adt.BytesKind
+ }
+ prefix := ""
+ for i := 0; i < len(n.Elts); i += 2 {
+ l, ok := n.Elts[i].(*ast.BasicLit)
+ if !ok {
+ return c.errf(n, "invalid interpolation")
+ }
+ s := l.Value
+ if !strings.HasPrefix(s, prefix) {
+ return c.errf(l, "invalid interpolation: unmatched ')'")
+ }
+ s = l.Value[prefixLen:]
+ x := parseString(c, l, info, s)
+ lit.Parts = append(lit.Parts, x)
+ if i+1 < len(n.Elts) {
+ lit.Parts = append(lit.Parts, c.expr(n.Elts[i+1]))
+ }
+ prefix = ")"
+ prefixLen = 1
+ }
+ return lit
+
+ case *ast.ParenExpr:
+ return c.expr(n.X)
+
+ case *ast.CallExpr:
+ call := &adt.CallExpr{Src: n, Fun: c.expr(n.Fun)}
+ for _, a := range n.Args {
+ call.Args = append(call.Args, c.expr(a))
+ }
+ return call
+
+ case *ast.UnaryExpr:
+ switch n.Op {
+ case token.NOT, token.ADD, token.SUB:
+ return &adt.UnaryExpr{
+ Src: n,
+ Op: adt.OpFromToken(n.Op),
+ X: c.expr(n.X),
+ }
+ case token.GEQ, token.GTR, token.LSS, token.LEQ,
+ token.NEQ, token.MAT, token.NMAT:
+ return &adt.BoundExpr{
+ Src: n,
+ Op: adt.OpFromToken(n.Op),
+ Expr: c.expr(n.X),
+ }
+
+ case token.MUL:
+ return c.errf(n, "preference mark not allowed at this position")
+ default:
+ return c.errf(n, "unsupported unary operator %q", n.Op)
+ }
+
+ case *ast.BinaryExpr:
+ switch n.Op {
+ case token.OR:
+ d := &adt.DisjunctionExpr{Src: n}
+ c.addDisjunctionElem(d, n.X, false)
+ c.addDisjunctionElem(d, n.Y, false)
+ return d
+
+ default:
+ op := adt.OpFromToken(n.Op)
+ x := c.expr(n.X)
+ y := c.expr(n.Y)
+ if op != adt.AndOp {
+ c.assertConcreteIsPossible(n.X, op, x)
+ c.assertConcreteIsPossible(n.Y, op, y)
+ }
+ // return updateBin(c,
+ return &adt.BinaryExpr{Src: n, Op: op, X: x, Y: y} // )
+ }
+
+ default:
+ return c.errf(n, "%s values not allowed in this position", ast.Name(n))
+ }
+}
+
+func (c *compiler) assertConcreteIsPossible(src ast.Node, op adt.Op, x adt.Expr) bool {
+ if !adt.AssertConcreteIsPossible(op, x) {
+ str := astinternal.DebugStr(src)
+ c.errf(src, "invalid operand %s ('%s' requires concrete value)", str, op)
+ }
+ return false
+}
+
+func (c *compiler) addDisjunctionElem(d *adt.DisjunctionExpr, n ast.Expr, mark bool) {
+ switch x := n.(type) {
+ case *ast.BinaryExpr:
+ if x.Op == token.OR {
+ c.addDisjunctionElem(d, x.X, mark)
+ c.addDisjunctionElem(d, x.Y, mark)
+ return
+ }
+ case *ast.UnaryExpr:
+ if x.Op == token.MUL {
+ d.HasDefaults = true
+ c.addDisjunctionElem(d, x.X, true)
+ return
+ }
+ }
+ d.Values = append(d.Values, adt.Disjunct{Val: c.expr(n), Default: mark})
+}
+
+// TODO(perf): validate that regexps are cached at the right time.
+
+func (c *compiler) parse(l *ast.BasicLit) (n adt.Expr) {
+ s := l.Value
+ if s == "" {
+ return c.errf(l, "invalid literal %q", s)
+ }
+ switch l.Kind {
+ case token.STRING:
+ info, nStart, _, err := literal.ParseQuotes(s, s)
+ if err != nil {
+ return c.errf(l, err.Error())
+ }
+ s := s[nStart:]
+ return parseString(c, l, info, s)
+
+ case token.FLOAT, token.INT:
+ err := literal.ParseNum(s, &c.num)
+ if err != nil {
+ return c.errf(l, "parse error: %v", err)
+ }
+ kind := adt.FloatKind
+ if c.num.IsInt() {
+ kind = adt.IntKind
+ }
+ n := &adt.Num{Src: l, K: kind}
+ if err = c.num.Decimal(&n.X); err != nil {
+ return c.errf(l, "error converting number to decimal: %v", err)
+ }
+ return n
+
+ case token.TRUE:
+ return &adt.Bool{Src: l, B: true}
+
+ case token.FALSE:
+ return &adt.Bool{Src: l, B: false}
+
+ case token.NULL:
+ return &adt.Null{Src: l}
+
+ default:
+ return c.errf(l, "unknown literal type")
+ }
+}
+
+// parseString decodes a string without the starting and ending quotes.
+func parseString(c *compiler, node ast.Expr, q literal.QuoteInfo, s string) (n adt.Expr) {
+ str, err := q.Unquote(s)
+ if err != nil {
+ return c.errf(node, "invalid string: %v", err)
+ }
+ if q.IsDouble() {
+ return &adt.String{Src: node, Str: str, RE: nil}
+ }
+ return &adt.Bytes{Src: node, B: []byte(str), RE: nil}
+}
diff --git a/vendor/cuelang.org/go/internal/core/compile/errors.go b/vendor/cuelang.org/go/internal/core/compile/errors.go
new file mode 100644
index 0000000000..fb97622961
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/compile/errors.go
@@ -0,0 +1,48 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compile
+
+import (
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+var _ errors.Error = &compilerError{}
+
+type compilerError struct {
+ n ast.Node
+ path []string
+ errors.Message
+}
+
+func (e *compilerError) Position() token.Pos { return e.n.Pos() }
+func (e *compilerError) InputPositions() []token.Pos { return nil }
+func (e *compilerError) Path() []string { return e.path }
+func (e *compilerError) Error() string {
+ pos := e.n.Pos()
+ // Import cycles deserve special treatment.
+ if pos.IsValid() {
+ // Omit import stack. The full path to the file where the error
+ // is the most important thing.
+ return pos.String() + ": " + e.Message.Error()
+ }
+ if len(e.path) == 0 {
+ return e.Message.Error()
+ }
+ return strings.Join(e.path, ".") + ": " + e.Message.Error()
+}
diff --git a/vendor/cuelang.org/go/internal/core/compile/label.go b/vendor/cuelang.org/go/internal/core/compile/label.go
new file mode 100644
index 0000000000..cd4d5a706c
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/compile/label.go
@@ -0,0 +1,150 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compile
+
+import (
+ "github.com/cockroachdb/apd/v2"
+ "golang.org/x/text/unicode/norm"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+// LabelFromNode converts an ADT node to a feature.
+func (c *compiler) label(n ast.Node) adt.Feature {
+ index := c.index
+ switch x := n.(type) {
+ case *ast.Ident:
+ if x.Name == "_" {
+ return adt.InvalidLabel
+ }
+ return adt.MakeIdentLabel(c.index, x.Name, c.pkgPath)
+
+ case *ast.BasicLit:
+ switch x.Kind {
+ case token.STRING:
+ const msg = "invalid string label: %v"
+ s, err := literal.Unquote(x.Value)
+ if err != nil {
+ c.errf(n, msg, err)
+ return adt.InvalidLabel
+ }
+
+ i := int64(index.StringToIndex(norm.NFC.String(s)))
+ f, err := adt.MakeLabel(n, i, adt.StringLabel)
+ if err != nil {
+ c.errf(n, msg, err)
+ }
+ return f
+
+ case token.INT:
+ const msg = "invalid int label: %v"
+ if err := literal.ParseNum(x.Value, &c.num); err != nil {
+ c.errf(n, msg, err)
+ return adt.InvalidLabel
+ }
+
+ var d apd.Decimal
+ if err := c.num.Decimal(&d); err != nil {
+ c.errf(n, msg, err)
+ return adt.InvalidLabel
+ }
+
+ i, err := d.Int64()
+ if err != nil {
+ c.errf(n, msg, err)
+ return adt.InvalidLabel
+ }
+
+ f, err := adt.MakeLabel(n, i, adt.IntLabel)
+ if err != nil {
+ c.errf(n, msg, err)
+ return adt.InvalidLabel
+ }
+ return f
+
+ case token.FLOAT:
+ _ = c.errf(n, "float %s cannot be used as label", x.Value)
+ return adt.InvalidLabel
+
+ default: // keywords (null, true, false, for, in, if, let)
+ i := index.StringToIndex(x.Kind.String())
+ f, err := adt.MakeLabel(n, i, adt.StringLabel)
+ if err != nil {
+ c.errf(n, "invalid string label: %v", err)
+ }
+ return f
+ }
+
+ default:
+ c.errf(n, "unsupported label node type %T", n)
+ return adt.InvalidLabel
+ }
+}
+
+// A labeler converts an AST node to a string representation.
+type labeler interface {
+ labelString() string
+}
+
+type fieldLabel ast.Field
+
+func (l *fieldLabel) labelString() string {
+ lab := l.Label
+
+ if a, ok := lab.(*ast.Alias); ok {
+ if x, _ := a.Expr.(ast.Label); x != nil {
+ lab = x
+ }
+ }
+
+ switch x := lab.(type) {
+ case *ast.Ident:
+ return x.Name
+
+ case *ast.BasicLit:
+ if x.Kind == token.STRING {
+ s, err := literal.Unquote(x.Value)
+ if err == nil && ast.IsValidIdent(s) {
+ return s
+ }
+ }
+ return x.Value
+
+ case *ast.ListLit:
+ return "[]" // TODO: more detail
+
+ case *ast.Interpolation:
+ return "?"
+ // case *ast.ParenExpr:
+ }
+ return ""
+}
+
+type forScope ast.ForClause
+
+func (l *forScope) labelString() string {
+ // TODO: include more info in square brackets.
+ return "for[]"
+}
+
+type letScope ast.LetClause
+
+func (l *letScope) labelString() string {
+ // TODO: include more info in square brackets.
+ return "let[]"
+}
diff --git a/vendor/cuelang.org/go/internal/core/compile/predeclared.go b/vendor/cuelang.org/go/internal/core/compile/predeclared.go
new file mode 100644
index 0000000000..6345147ac9
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/compile/predeclared.go
@@ -0,0 +1,169 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compile
+
+import (
+ "strconv"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func predeclared(n *ast.Ident) adt.Expr {
+ // TODO: consider supporting GraphQL-style names:
+ // String, Bytes, Boolean, Integer, Number.
+ // These names will not conflict with idiomatic camel-case JSON.
+ switch n.Name {
+ case "string", "__string":
+ return &adt.BasicType{Src: n, K: adt.StringKind}
+ case "bytes", "__bytes":
+ return &adt.BasicType{Src: n, K: adt.BytesKind}
+ case "bool", "__bool":
+ return &adt.BasicType{Src: n, K: adt.BoolKind}
+ case "int", "__int":
+ return &adt.BasicType{Src: n, K: adt.IntKind}
+ case "float", "__float":
+ return &adt.BasicType{Src: n, K: adt.FloatKind}
+ case "number", "__number":
+ return &adt.BasicType{Src: n, K: adt.NumKind}
+
+ case "len", "__len":
+ return lenBuiltin
+ case "close", "__close":
+ return closeBuiltin
+ case "and", "__and":
+ return andBuiltin
+ case "or", "__or":
+ return orBuiltin
+ case "div", "__div":
+ return divBuiltin
+ case "mod", "__mod":
+ return modBuiltin
+ case "quo", "__quo":
+ return quoBuiltin
+ case "rem", "__rem":
+ return remBuiltin
+ }
+
+ if r, ok := predefinedRanges[n.Name]; ok {
+ return r
+ }
+
+ return nil
+}
+
+// LookupRange returns a CUE expressions for the given predeclared identifier
+// representing a range, such as uint8, int128, and float64.
+func LookupRange(name string) adt.Expr {
+ return predefinedRanges[name]
+}
+
+var predefinedRanges = map[string]adt.Expr{
+ "rune": mkIntRange("0", strconv.Itoa(0x10FFFF)),
+ "int8": mkIntRange("-128", "127"),
+ "int16": mkIntRange("-32768", "32767"),
+ "int32": mkIntRange("-2147483648", "2147483647"),
+ "int64": mkIntRange("-9223372036854775808", "9223372036854775807"),
+ "int128": mkIntRange(
+ "-170141183460469231731687303715884105728",
+ "170141183460469231731687303715884105727"),
+
+ // Do not include an alias for "byte", as it would be too easily confused
+ // with the builtin "bytes".
+ "uint": mkUint(),
+ "uint8": mkIntRange("0", "255"),
+ "uint16": mkIntRange("0", "65535"),
+ "uint32": mkIntRange("0", "4294967295"),
+ "uint64": mkIntRange("0", "18446744073709551615"),
+ "uint128": mkIntRange("0", "340282366920938463463374607431768211455"),
+
+ // 2**127 * (2**24 - 1) / 2**23
+ "float32": mkFloatRange(
+ "-3.40282346638528859811704183484516925440e+38",
+ "3.40282346638528859811704183484516925440e+38",
+ ),
+ // 2**1023 * (2**53 - 1) / 2**52
+ "float64": mkFloatRange(
+ "-1.797693134862315708145274237317043567981e+308",
+ "1.797693134862315708145274237317043567981e+308",
+ ),
+}
+
+func init() {
+ for k, v := range predefinedRanges {
+ predefinedRanges["__"+k] = v
+ }
+}
+
+// TODO: use an adt.BoundValue here. and conjunctions here.
+
+func mkUint() adt.Expr {
+ from := newBound(adt.GreaterEqualOp, adt.IntKind, parseInt("0"))
+ ident := ast.NewIdent("__int")
+ src := ast.NewBinExpr(token.AND, ident, from.Src)
+ return &adt.Conjunction{
+ Src: src,
+ Values: []adt.Value{
+ &adt.BasicType{Src: ident, K: adt.IntKind}, from,
+ },
+ }
+}
+
+func mkIntRange(a, b string) adt.Expr {
+ from := newBound(adt.GreaterEqualOp, adt.IntKind, parseInt(a))
+ to := newBound(adt.LessEqualOp, adt.IntKind, parseInt(b))
+ ident := ast.NewIdent("__int")
+ src := ast.NewBinExpr(token.AND, ident, from.Src, to.Src)
+ return &adt.Conjunction{
+ Src: src,
+ Values: []adt.Value{
+ &adt.BasicType{Src: ident, K: adt.IntKind}, from, to,
+ },
+ }
+}
+
+func mkFloatRange(a, b string) adt.Expr {
+ from := newBound(adt.GreaterEqualOp, adt.NumKind, parseFloat(a))
+ to := newBound(adt.LessEqualOp, adt.NumKind, parseFloat(b))
+ src := ast.NewBinExpr(token.AND, from.Src, to.Src)
+ return &adt.Conjunction{Src: src, Values: []adt.Value{from, to}}
+}
+
+func newBound(op adt.Op, k adt.Kind, v adt.Value) *adt.BoundValue {
+ src := &ast.UnaryExpr{Op: op.Token(), X: v.Source().(ast.Expr)}
+ return &adt.BoundValue{Src: src, Op: op, Value: v}
+}
+
+func parseInt(s string) *adt.Num {
+ n := parseNum(adt.IntKind, s)
+ n.Src = &ast.BasicLit{Kind: token.INT, Value: s}
+ return n
+}
+
+func parseFloat(s string) *adt.Num {
+ n := parseNum(adt.FloatKind, s)
+ n.Src = &ast.BasicLit{Kind: token.FLOAT, Value: s}
+ return n
+}
+
+func parseNum(k adt.Kind, s string) *adt.Num {
+ num := &adt.Num{K: k}
+ _, _, err := num.X.SetString(s)
+ if err != nil {
+ panic(err)
+ }
+ return num
+}
diff --git a/vendor/cuelang.org/go/internal/core/convert/go.go b/vendor/cuelang.org/go/internal/core/convert/go.go
new file mode 100644
index 0000000000..b6cc20db25
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/convert/go.go
@@ -0,0 +1,817 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package convert allows converting to and from Go values and Types.
+package convert
+
+import (
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/cockroachdb/apd/v2"
+ "golang.org/x/text/encoding/unicode"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/compile"
+ "cuelang.org/go/internal/types"
+)
+
+// This file contains functionality for converting Go to CUE.
+//
+// The code in this file is a prototype implementation and is far from
+// optimized.
+
+func GoValueToValue(ctx *adt.OpContext, x interface{}, nilIsTop bool) adt.Value {
+ v := GoValueToExpr(ctx, nilIsTop, x)
+ // TODO: return Value
+ return toValue(v)
+}
+
+func GoTypeToExpr(ctx *adt.OpContext, x interface{}) (adt.Expr, errors.Error) {
+ v := convertGoType(ctx, reflect.TypeOf(x))
+ if err := ctx.Err(); err != nil {
+ return v, err.Err
+ }
+ return v, nil
+}
+
+func toValue(e adt.Expr) adt.Value {
+ if v, ok := e.(adt.Value); ok {
+ return v
+ }
+ obj := &adt.Vertex{}
+ obj.AddConjunct(adt.MakeRootConjunct(nil, e))
+ return obj
+}
+
+func compileExpr(ctx *adt.OpContext, expr ast.Expr) adt.Value {
+ c, err := compile.Expr(nil, ctx, pkgID(), expr)
+ if err != nil {
+ return &adt.Bottom{Err: errors.Promote(err, "compile")}
+ }
+ return adt.Resolve(ctx, c)
+}
+
+// parseTag parses a CUE expression from a cue tag.
+func parseTag(ctx *adt.OpContext, obj *ast.StructLit, field, tag string) ast.Expr {
+ if p := strings.Index(tag, ","); p >= 0 {
+ tag = tag[:p]
+ }
+ if tag == "" {
+ return topSentinel
+ }
+ expr, err := parser.ParseExpr("", tag)
+ if err != nil {
+ err := errors.Promote(err, "parser")
+ ctx.AddErr(errors.Wrapf(err, ctx.Pos(),
+ "invalid tag %q for field %q", tag, field))
+ return &ast.BadExpr{}
+ }
+ return expr
+}
+
+// TODO: should we allow mapping names in cue tags? This only seems like a good
+// idea if we ever want to allow mapping CUE to a different name than JSON.
+var tagsWithNames = []string{"json", "yaml", "protobuf"}
+
+func getName(f *reflect.StructField) string {
+ name := f.Name
+ if f.Anonymous {
+ name = ""
+ }
+ for _, s := range tagsWithNames {
+ if tag, ok := f.Tag.Lookup(s); ok {
+ if p := strings.Index(tag, ","); p >= 0 {
+ tag = tag[:p]
+ }
+ if tag != "" {
+ name = tag
+ break
+ }
+ }
+ }
+ return name
+}
+
+// isOptional indicates whether a field should be marked as optional.
+func isOptional(f *reflect.StructField) bool {
+ isOptional := false
+ switch f.Type.Kind() {
+ case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice:
+ // Note: it may be confusing to distinguish between an empty slice and
+ // a nil slice. However, it is also surprising to not be able to specify
+ // a default value for a slice. So for now we will allow it.
+ isOptional = true
+ }
+ if tag, ok := f.Tag.Lookup("cue"); ok {
+ // TODO: only if first field is not empty.
+ isOptional = false
+ for _, f := range strings.Split(tag, ",")[1:] {
+ switch f {
+ case "opt":
+ isOptional = true
+ case "req":
+ return false
+ }
+ }
+ } else if tag, ok = f.Tag.Lookup("json"); ok {
+ isOptional = false
+ for _, f := range strings.Split(tag, ",")[1:] {
+ if f == "omitempty" {
+ return true
+ }
+ }
+ }
+ return isOptional
+}
+
+// isOmitEmpty means that the zero value is interpreted as undefined.
+func isOmitEmpty(f *reflect.StructField) bool {
+ isOmitEmpty := false
+ switch f.Type.Kind() {
+ case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice:
+ // Note: it may be confusing to distinguish between an empty slice and
+ // a nil slice. However, it is also surprising to not be able to specify
+ // a default value for a slice. So for now we will allow it.
+ isOmitEmpty = true
+
+ default:
+ // TODO: we can also infer omit empty if a type cannot be nil if there
+ // is a constraint that unconditionally disallows the zero value.
+ }
+ tag, ok := f.Tag.Lookup("json")
+ if ok {
+ isOmitEmpty = false
+ for _, f := range strings.Split(tag, ",")[1:] {
+ if f == "omitempty" {
+ return true
+ }
+ }
+ }
+ return isOmitEmpty
+}
+
+// parseJSON parses JSON into a CUE value. b must be valid JSON.
+func parseJSON(ctx *adt.OpContext, b []byte) adt.Value {
+ expr, err := parser.ParseExpr("json", b)
+ if err != nil {
+ panic(err) // cannot happen
+ }
+ return compileExpr(ctx, expr)
+}
+
+func isZero(v reflect.Value) bool {
+ x := v.Interface()
+ if x == nil {
+ return true
+ }
+ switch k := v.Kind(); k {
+ case reflect.Struct, reflect.Array:
+ // we never allow optional values for these types.
+ return false
+
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
+ reflect.Slice:
+ // Note that for maps we preserve the distinction between a nil map and
+ // an empty map.
+ return v.IsNil()
+
+ case reflect.String:
+ return v.Len() == 0
+
+ default:
+ return x == reflect.Zero(v.Type()).Interface()
+ }
+}
+
+func GoValueToExpr(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Expr {
+ e := convertRec(ctx, nilIsTop, x)
+ if e == nil {
+ return ctx.AddErrf("unsupported Go type (%T)", x)
+ }
+ return e
+}
+
+func isNil(x reflect.Value) bool {
+ switch x.Kind() {
+ // Only check for supported types; ignore func and chan.
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Interface:
+ return x.IsNil()
+ }
+ return false
+}
+
+func convertRec(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Value {
+ if t := (&types.Value{}); types.CastValue(t, x) {
+ // TODO: panic if nto the same runtime.
+ return t.V
+ }
+ src := ctx.Source()
+ switch v := x.(type) {
+ case nil:
+ if nilIsTop {
+ ident, _ := ctx.Source().(*ast.Ident)
+ return &adt.Top{Src: ident}
+ }
+ return &adt.Null{Src: ctx.Source()}
+
+ case *ast.File:
+ x, err := compile.Files(nil, ctx, pkgID(), v)
+ if err != nil {
+ return &adt.Bottom{Err: errors.Promote(err, "compile")}
+ }
+ if len(x.Conjuncts) != 1 {
+ panic("unexpected length")
+ }
+ return x
+
+ case ast.Expr:
+ return compileExpr(ctx, v)
+
+ case *big.Int:
+ return &adt.Num{Src: src, K: adt.IntKind, X: *apd.NewWithBigInt(v, 0)}
+
+ case *big.Rat:
+ // should we represent this as a binary operation?
+ n := &adt.Num{Src: src, K: adt.IntKind}
+ _, err := apd.BaseContext.Quo(&n.X, apd.NewWithBigInt(v.Num(), 0), apd.NewWithBigInt(v.Denom(), 0))
+ if err != nil {
+ return ctx.AddErrf("could not convert *big.Rat: %v", err)
+ }
+ if !v.IsInt() {
+ n.K = adt.FloatKind
+ }
+ return n
+
+ case *big.Float:
+ n := &adt.Num{Src: src, K: adt.FloatKind}
+ _, _, err := n.X.SetString(v.String())
+ if err != nil {
+ return ctx.AddErr(errors.Promote(err, "invalid float"))
+ }
+ return n
+
+ case *apd.Decimal:
+ // TODO: should we allow an "int" bit to be set here? It is a bit
+ // tricky, as we would also need to pass down the result of rounding.
+ // So more likely an API must return explicitly whether a value is
+ // a float or an int after all.
+ // The code to autodetect whether something is an integer can be done
+ // with this:
+ kind := adt.FloatKind
+ var d apd.Decimal
+ res, _ := apd.BaseContext.RoundToIntegralExact(&d, v)
+ if !res.Inexact() {
+ kind = adt.IntKind
+ }
+ n := &adt.Num{Src: ctx.Source(), K: kind}
+ n.X = *v
+ return n
+
+ case json.Marshaler:
+ b, err := v.MarshalJSON()
+ if err != nil {
+ return ctx.AddErr(errors.Promote(err, "json.Marshaler"))
+ }
+
+ return parseJSON(ctx, b)
+
+ case encoding.TextMarshaler:
+ b, err := v.MarshalText()
+ if err != nil {
+ return ctx.AddErr(errors.Promote(err, "encoding.TextMarshaler"))
+ }
+ b, err = json.Marshal(string(b))
+ if err != nil {
+ return ctx.AddErr(errors.Promote(err, "json"))
+ }
+ return parseJSON(ctx, b)
+
+ case error:
+ var errs errors.Error
+ switch x := v.(type) {
+ case errors.Error:
+ errs = x
+ default:
+ errs = ctx.Newf("%s", x.Error())
+ }
+ return &adt.Bottom{Err: errs}
+ case bool:
+ return &adt.Bool{Src: ctx.Source(), B: v}
+ case string:
+ s, _ := unicode.UTF8.NewEncoder().String(v)
+ return &adt.String{Src: ctx.Source(), Str: s}
+ case []byte:
+ return &adt.Bytes{Src: ctx.Source(), B: v}
+ case int:
+ return toInt(ctx, int64(v))
+ case int8:
+ return toInt(ctx, int64(v))
+ case int16:
+ return toInt(ctx, int64(v))
+ case int32:
+ return toInt(ctx, int64(v))
+ case int64:
+ return toInt(ctx, int64(v))
+ case uint:
+ return toUint(ctx, uint64(v))
+ case uint8:
+ return toUint(ctx, uint64(v))
+ case uint16:
+ return toUint(ctx, uint64(v))
+ case uint32:
+ return toUint(ctx, uint64(v))
+ case uint64:
+ return toUint(ctx, uint64(v))
+ case uintptr:
+ return toUint(ctx, uint64(v))
+ case float64:
+ n := &adt.Num{Src: src, K: adt.FloatKind}
+ _, _, err := n.X.SetString(fmt.Sprint(v))
+ if err != nil {
+ return ctx.AddErr(errors.Promote(err, "invalid float"))
+ }
+ return n
+ case float32:
+ n := &adt.Num{Src: src, K: adt.FloatKind}
+ _, _, err := n.X.SetString(fmt.Sprint(v))
+ if err != nil {
+ return ctx.AddErr(errors.Promote(err, "invalid float"))
+ }
+ return n
+
+ case reflect.Value:
+ if v.CanInterface() {
+ return convertRec(ctx, nilIsTop, v.Interface())
+ }
+
+ default:
+ value := reflect.ValueOf(v)
+ switch value.Kind() {
+ case reflect.Bool:
+ return &adt.Bool{Src: ctx.Source(), B: value.Bool()}
+
+ case reflect.String:
+ str := value.String()
+ str, _ = unicode.UTF8.NewEncoder().String(str)
+ // TODO: here and above: allow to fail on invalid strings.
+ // if !utf8.ValidString(str) {
+ // return ctx.AddErrf("cannot convert result to string: invalid UTF-8")
+ // }
+ return &adt.String{Src: ctx.Source(), Str: str}
+
+ case reflect.Int, reflect.Int8, reflect.Int16,
+ reflect.Int32, reflect.Int64:
+ return toInt(ctx, value.Int())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return toUint(ctx, value.Uint())
+
+ case reflect.Float32, reflect.Float64:
+ return convertRec(ctx, nilIsTop, value.Float())
+
+ case reflect.Ptr:
+ if value.IsNil() {
+ if nilIsTop {
+ ident, _ := ctx.Source().(*ast.Ident)
+ return &adt.Top{Src: ident}
+ }
+ return &adt.Null{Src: ctx.Source()}
+ }
+ return convertRec(ctx, nilIsTop, value.Elem().Interface())
+
+ case reflect.Struct:
+ obj := &adt.StructLit{Src: src}
+ v := &adt.Vertex{}
+ env := ctx.Env(0)
+ if env == nil {
+ env = &adt.Environment{}
+ }
+ v.AddStruct(obj, env, adt.CloseInfo{})
+ v.SetValue(ctx, adt.Finalized, &adt.StructMarker{})
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ sf := t.Field(i)
+ if sf.PkgPath != "" {
+ continue
+ }
+ val := value.Field(i)
+ if !nilIsTop && isNil(val) {
+ continue
+ }
+ if tag, _ := sf.Tag.Lookup("json"); tag == "-" {
+ continue
+ }
+ if isOmitEmpty(&sf) && isZero(val) {
+ continue
+ }
+ sub := convertRec(ctx, nilIsTop, val.Interface())
+ if sub == nil {
+ // mimic behavior of encoding/json: skip fields of unsupported types
+ continue
+ }
+ if _, ok := sub.(*adt.Bottom); ok {
+ return sub
+ }
+
+ // leave errors like we do during normal evaluation or do we
+ // want to return the error?
+ name := getName(&sf)
+ if name == "-" {
+ continue
+ }
+ if sf.Anonymous && name == "" {
+ arc, ok := sub.(*adt.Vertex)
+ if ok {
+ v.Arcs = append(v.Arcs, arc.Arcs...)
+ }
+ continue
+ }
+
+ f := ctx.StringLabel(name)
+ obj.Decls = append(obj.Decls, &adt.Field{Label: f, Value: sub})
+ arc, ok := sub.(*adt.Vertex)
+ if ok {
+ a := *arc
+ arc = &a
+ arc.Label = f
+ } else {
+ arc = &adt.Vertex{Label: f, BaseValue: sub}
+ arc.UpdateStatus(adt.Finalized)
+ arc.AddConjunct(adt.MakeRootConjunct(nil, sub))
+ }
+ v.Arcs = append(v.Arcs, arc)
+ }
+
+ return v
+
+ case reflect.Map:
+ v := &adt.Vertex{BaseValue: &adt.StructMarker{}}
+ v.SetValue(ctx, adt.Finalized, &adt.StructMarker{})
+
+ t := value.Type()
+ switch key := t.Key(); key.Kind() {
+ default:
+ if !key.Implements(textMarshaler) {
+ return ctx.AddErrf("unsupported Go type for map key (%v)", key)
+ }
+ fallthrough
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16,
+ reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ keys := value.MapKeys()
+ sort.Slice(keys, func(i, j int) bool {
+ return fmt.Sprint(keys[i]) < fmt.Sprint(keys[j])
+ })
+ for _, k := range keys {
+ val := value.MapIndex(k)
+ // if isNil(val) {
+ // continue
+ // }
+
+ sub := convertRec(ctx, nilIsTop, val.Interface())
+ // mimic behavior of encoding/json: report error of
+ // unsupported type.
+ if sub == nil {
+ return ctx.AddErrf("unsupported Go type (%T)", val.Interface())
+ }
+ if isBottom(sub) {
+ return sub
+ }
+
+ s := fmt.Sprint(k)
+ f := ctx.StringLabel(s)
+ arc, ok := sub.(*adt.Vertex)
+ if ok {
+ a := *arc
+ arc = &a
+ arc.Label = f
+ } else {
+ arc = &adt.Vertex{Label: f, BaseValue: sub}
+ arc.UpdateStatus(adt.Finalized)
+ arc.AddConjunct(adt.MakeRootConjunct(nil, sub))
+ }
+ v.Arcs = append(v.Arcs, arc)
+ }
+ }
+
+ return v
+
+ case reflect.Slice, reflect.Array:
+ var values []adt.Value
+
+ for i := 0; i < value.Len(); i++ {
+ val := value.Index(i)
+ x := convertRec(ctx, nilIsTop, val.Interface())
+ if x == nil {
+ return ctx.AddErrf("unsupported Go type (%T)",
+ val.Interface())
+ }
+ if isBottom(x) {
+ return x
+ }
+ values = append(values, x)
+ }
+
+ return ctx.NewList(values...)
+ }
+ }
+ return nil
+}
+
+func toInt(ctx *adt.OpContext, x int64) adt.Value {
+ n := &adt.Num{Src: ctx.Source(), K: adt.IntKind}
+ n.X = *apd.New(x, 0)
+ return n
+}
+
+func toUint(ctx *adt.OpContext, x uint64) adt.Value {
+ n := &adt.Num{Src: ctx.Source(), K: adt.IntKind}
+ n.X.Coeff.SetUint64(x)
+ return n
+}
+
+func convertGoType(ctx *adt.OpContext, t reflect.Type) adt.Expr {
+ // TODO: this can be much more efficient.
+ // TODO: synchronize
+ return goTypeToValue(ctx, true, t)
+}
+
+var (
+ jsonMarshaler = reflect.TypeOf(new(json.Marshaler)).Elem()
+ textMarshaler = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+ topSentinel = ast.NewIdent("_")
+)
+
+// goTypeToValue converts a Go Type to a value.
+//
+// TODO: if this value will always be unified with a concrete type in Go, then
+// many of the fields may be omitted.
+func goTypeToValue(ctx *adt.OpContext, allowNullDefault bool, t reflect.Type) adt.Expr {
+ if _, t, ok := ctx.LoadType(t); ok {
+ return t
+ }
+
+ _, v := goTypeToValueRec(ctx, allowNullDefault, t)
+ if v == nil {
+ return ctx.AddErrf("unsupported Go type (%v)", t)
+ }
+ return v
+}
+
+func goTypeToValueRec(ctx *adt.OpContext, allowNullDefault bool, t reflect.Type) (e ast.Expr, expr adt.Expr) {
+ if src, t, ok := ctx.LoadType(t); ok {
+ return src, t
+ }
+
+ switch reflect.Zero(t).Interface().(type) {
+ case *big.Int, big.Int:
+ e = ast.NewIdent("int")
+ goto store
+
+ case *big.Float, big.Float, *big.Rat, big.Rat:
+ e = ast.NewIdent("number")
+ goto store
+
+ case *apd.Decimal, apd.Decimal:
+ e = ast.NewIdent("number")
+ goto store
+ }
+
+ // Even if this is for types that we know cast to a certain type, it can't
+ // hurt to return top, as in these cases the concrete values will be
+ // strict instances and there cannot be any tags that further constrain
+ // the values.
+ if t.Implements(jsonMarshaler) || t.Implements(textMarshaler) {
+ return topSentinel, nil
+ }
+
+ switch k := t.Kind(); k {
+ case reflect.Ptr:
+ elem := t.Elem()
+ for elem.Kind() == reflect.Ptr {
+ elem = elem.Elem()
+ }
+ e, _ = goTypeToValueRec(ctx, false, elem)
+ if allowNullDefault {
+ e = wrapOrNull(e)
+ }
+
+ case reflect.Interface:
+ switch t.Name() {
+ case "error":
+ // This is really null | _|_. There is no error if the error is null.
+ e = ast.NewNull()
+ default:
+ e = topSentinel // `_`
+ }
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ e = compile.LookupRange(t.Kind().String()).Source().(ast.Expr)
+
+ case reflect.Uint, reflect.Uintptr:
+ e = compile.LookupRange("uint64").Source().(ast.Expr)
+
+ case reflect.Int:
+ e = compile.LookupRange("int64").Source().(ast.Expr)
+
+ case reflect.String:
+ e = ast.NewIdent("__string")
+
+ case reflect.Bool:
+ e = ast.NewIdent("__bool")
+
+ case reflect.Float32, reflect.Float64:
+ e = ast.NewIdent("__number")
+
+ case reflect.Struct:
+ obj := &ast.StructLit{}
+
+ // TODO: dirty trick: set this to a temporary Vertex and then update the
+ // arcs and conjuncts of this vertex below. This will allow circular
+ // references. Maybe have a special kind of "hardlink" reference.
+ ctx.StoreType(t, obj, nil)
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.PkgPath != "" {
+ continue
+ }
+ _, ok := f.Tag.Lookup("cue")
+ elem, _ := goTypeToValueRec(ctx, !ok, f.Type)
+ if isBad(elem) {
+ continue // Ignore fields for unsupported types
+ }
+
+ // leave errors like we do during normal evaluation or do we
+ // want to return the error?
+ name := getName(&f)
+ if name == "-" {
+ continue
+ }
+
+ if tag, ok := f.Tag.Lookup("cue"); ok {
+ v := parseTag(ctx, obj, name, tag)
+ if isBad(v) {
+ return v, nil
+ }
+ elem = ast.NewBinExpr(token.AND, elem, v)
+ }
+ // TODO: if an identifier starts with __ (or otherwise is not a
+ // valid CUE name), make it a string and create a map to a new
+ // name for references.
+
+ // The GO JSON decoder always allows a value to be undefined.
+ d := &ast.Field{Label: ast.NewIdent(name), Value: elem}
+ if isOptional(&f) {
+ d.Optional = token.Blank.Pos()
+ }
+ obj.Elts = append(obj.Elts, d)
+ }
+
+ // TODO: should we validate references here? Can be done using
+ // astutil.ToFile and astutil.Resolve.
+
+ e = obj
+
+ case reflect.Array, reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ e = ast.NewIdent("__bytes")
+ } else {
+ elem, _ := goTypeToValueRec(ctx, allowNullDefault, t.Elem())
+ if elem == nil {
+ b := ctx.AddErrf("unsupported Go type (%v)", t.Elem())
+ return &ast.BadExpr{}, b
+ }
+
+ if t.Kind() == reflect.Array {
+ e = ast.NewBinExpr(token.MUL,
+ ast.NewLit(token.INT, strconv.Itoa(t.Len())),
+ ast.NewList(elem))
+ } else {
+ e = ast.NewList(&ast.Ellipsis{Type: elem})
+ }
+ }
+ if k == reflect.Slice {
+ e = wrapOrNull(e)
+ }
+
+ case reflect.Map:
+ switch key := t.Key(); key.Kind() {
+ case reflect.String, reflect.Int, reflect.Int8, reflect.Int16,
+ reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8,
+ reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ default:
+ b := ctx.AddErrf("unsupported Go type for map key (%v)", key)
+ return &ast.BadExpr{}, b
+ }
+
+ v, x := goTypeToValueRec(ctx, allowNullDefault, t.Elem())
+ if v == nil {
+ b := ctx.AddErrf("unsupported Go type (%v)", t.Elem())
+ return &ast.BadExpr{}, b
+ }
+ if isBad(v) {
+ return v, x
+ }
+
+ e = ast.NewStruct(&ast.Field{
+ Label: ast.NewList(ast.NewIdent("__string")),
+ Value: v,
+ })
+
+ e = wrapOrNull(e)
+ }
+
+store:
+ // TODO: store error if not nil?
+ if e != nil {
+ f := &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: e}}}
+ astutil.Resolve(f, func(_ token.Pos, msg string, args ...interface{}) {
+ ctx.AddErrf(msg, args...)
+ })
+ var x adt.Expr
+ c, err := compile.Expr(nil, ctx, pkgID(), e)
+ if err != nil {
+ b := &adt.Bottom{Err: err}
+ ctx.AddBottom(b)
+ x = b
+ } else {
+ x = c.Expr()
+ }
+ ctx.StoreType(t, e, x)
+ return e, x
+ }
+ return e, nil
+}
+
+func isBottom(x adt.Node) bool {
+ if x == nil {
+ return true
+ }
+ b, _ := x.(*adt.Bottom)
+ return b != nil
+}
+
+func isBad(x ast.Expr) bool {
+ if x == nil {
+ return true
+ }
+ if bad, _ := x.(*ast.BadExpr); bad != nil {
+ return true
+ }
+ return false
+}
+
+func wrapOrNull(e ast.Expr) ast.Expr {
+ switch x := e.(type) {
+ case *ast.BasicLit:
+ if x.Kind == token.NULL {
+ return x
+ }
+ case *ast.BadExpr:
+ return e
+ }
+ return makeNullable(e, true)
+}
+
+func makeNullable(e ast.Expr, nullIsDefault bool) ast.Expr {
+ var null ast.Expr = ast.NewNull()
+ if nullIsDefault {
+ null = &ast.UnaryExpr{Op: token.MUL, X: null}
+ }
+ return ast.NewBinExpr(token.OR, null, e)
+}
+
+// pkgID returns a package path that can never resolve to an existing package.
+func pkgID() string {
+ return "_"
+}
diff --git a/vendor/cuelang.org/go/internal/core/debug/compact.go b/vendor/cuelang.org/go/internal/core/debug/compact.go
new file mode 100644
index 0000000000..c0069f8b52
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/debug/compact.go
@@ -0,0 +1,337 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debug prints a given ADT node.
+//
+// Note that the result is not valid CUE, but instead prints the internals
+// of an ADT node in human-readable form. It uses a simple indentation algorithm
+// for improved readability and diffing.
+//
+package debug
+
+import (
+ "fmt"
+
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/internal/core/adt"
+)
+
+type compactPrinter struct {
+ printer
+}
+
+func (w *compactPrinter) node(n adt.Node) {
+ switch x := n.(type) {
+ case *adt.Vertex:
+ if x.BaseValue == nil || (w.cfg.Raw && !x.IsData()) {
+ for i, c := range x.Conjuncts {
+ if i > 0 {
+ w.string(" & ")
+ }
+ w.node(c.Elem())
+ }
+ return
+ }
+
+ switch v := x.BaseValue.(type) {
+ case *adt.StructMarker:
+ w.string("{")
+ for i, a := range x.Arcs {
+ if i > 0 {
+ w.string(",")
+ }
+ w.label(a.Label)
+ w.string(":")
+ w.node(a)
+ }
+ w.string("}")
+
+ case *adt.ListMarker:
+ w.string("[")
+ for i, a := range x.Arcs {
+ if i > 0 {
+ w.string(",")
+ }
+ w.node(a)
+ }
+ w.string("]")
+
+ case adt.Value:
+ w.node(v)
+ }
+
+ case *adt.StructMarker:
+ w.string("struct")
+
+ case *adt.ListMarker:
+ w.string("list")
+
+ case *adt.StructLit:
+ w.string("{")
+ for i, d := range x.Decls {
+ if i > 0 {
+ w.string(",")
+ }
+ w.node(d)
+ }
+ w.string("}")
+
+ case *adt.ListLit:
+ w.string("[")
+ for i, d := range x.Elems {
+ if i > 0 {
+ w.string(",")
+ }
+ w.node(d)
+ }
+ w.string("]")
+
+ case *adt.Field:
+ s := w.labelString(x.Label)
+ w.string(s)
+ w.string(":")
+ w.node(x.Value)
+
+ case *adt.OptionalField:
+ s := w.labelString(x.Label)
+ w.string(s)
+ w.string("?:")
+ w.node(x.Value)
+
+ case *adt.BulkOptionalField:
+ w.string("[")
+ w.node(x.Filter)
+ w.string("]:")
+ w.node(x.Value)
+
+ case *adt.DynamicField:
+ w.node(x.Key)
+ if x.IsOptional() {
+ w.string("?")
+ }
+ w.string(":")
+ w.node(x.Value)
+
+ case *adt.Ellipsis:
+ w.string("...")
+ if x.Value != nil {
+ w.node(x.Value)
+ }
+
+ case *adt.Bottom:
+ w.string(`_|_`)
+ if x.Err != nil {
+ w.string("(")
+ w.string(x.Err.Error())
+ w.string(")")
+ }
+
+ case *adt.Null:
+ w.string("null")
+
+ case *adt.Bool:
+ fmt.Fprint(w, x.B)
+
+ case *adt.Num:
+ fmt.Fprint(w, &x.X)
+
+ case *adt.String:
+ w.string(literal.String.Quote(x.Str))
+
+ case *adt.Bytes:
+ w.string(literal.Bytes.Quote(string(x.B)))
+
+ case *adt.Top:
+ w.string("_")
+
+ case *adt.BasicType:
+ fmt.Fprint(w, x.K)
+
+ case *adt.BoundExpr:
+ fmt.Fprint(w, x.Op)
+ w.node(x.Expr)
+
+ case *adt.BoundValue:
+ fmt.Fprint(w, x.Op)
+ w.node(x.Value)
+
+ case *adt.NodeLink:
+ w.string(openTuple)
+ for i, f := range x.Node.Path() {
+ if i > 0 {
+ w.string(".")
+ }
+ w.label(f)
+ }
+ w.string(closeTuple)
+
+ case *adt.FieldReference:
+ w.label(x.Label)
+
+ case *adt.ValueReference:
+ w.label(x.Label)
+
+ case *adt.LabelReference:
+ if x.Src == nil {
+ w.string("LABEL")
+ } else {
+ w.string(x.Src.Name)
+ }
+
+ case *adt.DynamicReference:
+ w.node(x.Label)
+
+ case *adt.ImportReference:
+ w.label(x.ImportPath)
+
+ case *adt.LetReference:
+ w.ident(x.Label)
+
+ case *adt.SelectorExpr:
+ w.node(x.X)
+ w.string(".")
+ w.label(x.Sel)
+
+ case *adt.IndexExpr:
+ w.node(x.X)
+ w.string("[")
+ w.node(x.Index)
+ w.string("]")
+
+ case *adt.SliceExpr:
+ w.node(x.X)
+ w.string("[")
+ if x.Lo != nil {
+ w.node(x.Lo)
+ }
+ w.string(":")
+ if x.Hi != nil {
+ w.node(x.Hi)
+ }
+ if x.Stride != nil {
+ w.string(":")
+ w.node(x.Stride)
+ }
+ w.string("]")
+
+ case *adt.Interpolation:
+ w.interpolation(x)
+
+ case *adt.UnaryExpr:
+ fmt.Fprint(w, x.Op)
+ w.node(x.X)
+
+ case *adt.BinaryExpr:
+ w.string("(")
+ w.node(x.X)
+ fmt.Fprint(w, " ", x.Op, " ")
+ w.node(x.Y)
+ w.string(")")
+
+ case *adt.CallExpr:
+ w.node(x.Fun)
+ w.string("(")
+ for i, a := range x.Args {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.node(a)
+ }
+ w.string(")")
+
+ case *adt.Builtin:
+ if x.Package != 0 {
+ w.label(x.Package)
+ w.string(".")
+ }
+ w.string(x.Name)
+
+ case *adt.BuiltinValidator:
+ w.node(x.Builtin)
+ w.string("(")
+ for i, a := range x.Args {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.node(a)
+ }
+ w.string(")")
+
+ case *adt.DisjunctionExpr:
+ w.string("(")
+ for i, a := range x.Values {
+ if i > 0 {
+ w.string("|")
+ }
+ // Disjunct
+ if a.Default {
+ w.string("*")
+ }
+ w.node(a.Val)
+ }
+ w.string(")")
+
+ case *adt.Conjunction:
+ for i, c := range x.Values {
+ if i > 0 {
+ w.string(" & ")
+ }
+ w.node(c)
+ }
+
+ case *adt.Disjunction:
+ for i, c := range x.Values {
+ if i > 0 {
+ w.string(" | ")
+ }
+ if i < x.NumDefaults {
+ w.string("*")
+ }
+ w.node(c)
+ }
+
+ case *adt.Comprehension:
+ w.node(x.Clauses)
+ w.node(x.Value)
+
+ case *adt.ForClause:
+ w.string("for ")
+ w.ident(x.Key)
+ w.string(", ")
+ w.ident(x.Value)
+ w.string(" in ")
+ w.node(x.Src)
+ w.string(" ")
+ w.node(x.Dst)
+
+ case *adt.IfClause:
+ w.string("if ")
+ w.node(x.Condition)
+ w.string(" ")
+ w.node(x.Dst)
+
+ case *adt.LetClause:
+ w.string("let ")
+ w.ident(x.Label)
+ w.string(" = ")
+ w.node(x.Expr)
+ w.string(" ")
+ w.node(x.Dst)
+
+ case *adt.ValueClause:
+
+ default:
+ panic(fmt.Sprintf("unknown type %T", x))
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/debug/debug.go b/vendor/cuelang.org/go/internal/core/debug/debug.go
new file mode 100644
index 0000000000..8394f83eee
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/debug/debug.go
@@ -0,0 +1,533 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debug prints a given ADT node.
+//
+// Note that the result is not valid CUE, but instead prints the internals
+// of an ADT node in human-readable form. It uses a simple indentation algorithm
+// for improved readability and diffing.
+//
+package debug
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+)
+
+const (
+ openTuple = "\u3008"
+ closeTuple = "\u3009"
+)
+
+type Config struct {
+ Cwd string
+ Compact bool
+ Raw bool
+}
+
+func WriteNode(w io.Writer, i adt.StringIndexer, n adt.Node, config *Config) {
+ if config == nil {
+ config = &Config{}
+ }
+ p := printer{Writer: w, index: i, cfg: config}
+ if config.Compact {
+ p := compactPrinter{p}
+ p.node(n)
+ } else {
+ p.node(n)
+ }
+}
+
+func NodeString(i adt.StringIndexer, n adt.Node, config *Config) string {
+ b := &strings.Builder{}
+ WriteNode(b, i, n, config)
+ return b.String()
+}
+
+type printer struct {
+ io.Writer
+ index adt.StringIndexer
+ indent string
+ cfg *Config
+
+ // modes:
+ // - show vertex
+ // - show original conjuncts
+ // - show unevaluated
+ // - auto
+}
+
+func (w *printer) string(s string) {
+ s = strings.Replace(s, "\n", "\n"+w.indent, -1)
+ _, _ = io.WriteString(w, s)
+}
+
+func (w *printer) label(f adt.Feature) {
+ w.string(w.labelString(f))
+}
+
+func (w *printer) ident(f adt.Feature) {
+ w.string(f.IdentString(w.index))
+}
+
+// TODO: fold into label once :: is no longer supported.
+func (w *printer) labelString(f adt.Feature) string {
+ if f.IsHidden() {
+ ident := f.IdentString(w.index)
+ if pkgName := f.PkgID(w.index); pkgName != "_" {
+ ident = fmt.Sprintf("%s(%s)", ident, pkgName)
+ }
+ return ident
+ }
+ return f.SelectorString(w.index)
+}
+
+func (w *printer) shortError(errs errors.Error) {
+ for {
+ msg, args := errs.Msg()
+ fmt.Fprintf(w, msg, args...)
+
+ err := errors.Unwrap(errs)
+ if err == nil {
+ break
+ }
+
+ if errs, _ = err.(errors.Error); errs != nil {
+ w.string(err.Error())
+ break
+ }
+ }
+}
+
+func (w *printer) interpolation(x *adt.Interpolation) {
+ quote := `"`
+ if x.K == adt.BytesKind {
+ quote = `'`
+ }
+ w.string(quote)
+ for i := 0; i < len(x.Parts); i += 2 {
+ switch x.K {
+ case adt.StringKind:
+ if s, ok := x.Parts[i].(*adt.String); ok {
+ w.string(s.Str)
+ } else {
+ w.string("")
+ }
+ case adt.BytesKind:
+ if s, ok := x.Parts[i].(*adt.Bytes); ok {
+ _, _ = w.Write(s.B)
+ } else {
+ w.string("")
+ }
+ }
+ if i+1 < len(x.Parts) {
+ w.string(`\(`)
+ w.node(x.Parts[i+1])
+ w.string(`)`)
+ }
+ }
+ w.string(quote)
+}
+
+func (w *printer) node(n adt.Node) {
+ switch x := n.(type) {
+ case *adt.Vertex:
+ var kind adt.Kind
+ if x.BaseValue != nil {
+ kind = x.BaseValue.Kind()
+ }
+
+ kindStr := kind.String()
+
+ // TODO: replace with showing full closedness data.
+ if x.IsClosedList() || x.IsClosedStruct() {
+ if kind == adt.ListKind || kind == adt.StructKind {
+ kindStr = "#" + kindStr
+ }
+ }
+
+ fmt.Fprintf(w, "(%s){", kindStr)
+
+ saved := w.indent
+ w.indent += " "
+ defer func() { w.indent = saved }()
+
+ switch v := x.BaseValue.(type) {
+ case nil:
+ case *adt.Bottom:
+ // TODO: reuse bottom.
+ saved := w.indent
+ w.indent += "// "
+ w.string("\n")
+ fmt.Fprintf(w, "[%v]", v.Code)
+ if !v.ChildError {
+ msg := errors.Details(v.Err, &errors.Config{
+ Cwd: w.cfg.Cwd,
+ ToSlash: true,
+ })
+ msg = strings.TrimSpace(msg)
+ if msg != "" {
+ w.string(" ")
+ w.string(msg)
+ }
+ }
+ w.indent = saved
+
+ case *adt.StructMarker, *adt.ListMarker:
+ // if len(x.Arcs) == 0 {
+ // // w.string("}")
+ // // return
+ // }
+
+ case adt.Value:
+ if len(x.Arcs) == 0 {
+ w.string(" ")
+ w.node(v)
+ w.string(" }")
+ return
+ }
+ w.string("\n")
+ w.node(v)
+ }
+
+ for _, a := range x.Arcs {
+ w.string("\n")
+ w.label(a.Label)
+ w.string(": ")
+ w.node(a)
+ }
+
+ if x.BaseValue == nil {
+ w.indent += "// "
+ w.string("// ")
+ for i, c := range x.Conjuncts {
+ if i > 0 {
+ w.string(" & ")
+ }
+ w.node(c.Elem()) // TODO: also include env?
+ }
+ }
+
+ w.indent = saved
+ w.string("\n")
+ w.string("}")
+
+ case *adt.StructMarker:
+ w.string("struct")
+
+ case *adt.ListMarker:
+ w.string("list")
+
+ case *adt.StructLit:
+ if len(x.Decls) == 0 {
+ w.string("{}")
+ break
+ }
+ w.string("{")
+ w.indent += " "
+ for _, d := range x.Decls {
+ w.string("\n")
+ w.node(d)
+ }
+ w.indent = w.indent[:len(w.indent)-2]
+ w.string("\n}")
+
+ case *adt.ListLit:
+ if len(x.Elems) == 0 {
+ w.string("[]")
+ break
+ }
+ w.string("[")
+ w.indent += " "
+ for _, d := range x.Elems {
+ w.string("\n")
+ w.node(d)
+ w.string(",")
+ }
+ w.indent = w.indent[:len(w.indent)-2]
+ w.string("\n]")
+
+ case *adt.Field:
+ s := w.labelString(x.Label)
+ w.string(s)
+ w.string(":")
+ if x.Label.IsDef() && !internal.IsDef(s) {
+ w.string(":")
+ }
+ w.string(" ")
+ w.node(x.Value)
+
+ case *adt.OptionalField:
+ s := w.labelString(x.Label)
+ w.string(s)
+ w.string("?:")
+ if x.Label.IsDef() && !internal.IsDef(s) {
+ w.string(":")
+ }
+ w.string(" ")
+ w.node(x.Value)
+
+ case *adt.BulkOptionalField:
+ w.string("[")
+ w.node(x.Filter)
+ w.string("]: ")
+ w.node(x.Value)
+
+ case *adt.DynamicField:
+ w.node(x.Key)
+ if x.IsOptional() {
+ w.string("?")
+ }
+ w.string(": ")
+ w.node(x.Value)
+
+ case *adt.Ellipsis:
+ w.string("...")
+ if x.Value != nil {
+ w.node(x.Value)
+ }
+
+ case *adt.Bottom:
+ w.string(`_|_`)
+ if x.Err != nil {
+ w.string("(")
+ w.shortError(x.Err)
+ w.string(")")
+ }
+
+ case *adt.Null:
+ w.string("null")
+
+ case *adt.Bool:
+ fmt.Fprint(w, x.B)
+
+ case *adt.Num:
+ fmt.Fprint(w, &x.X)
+
+ case *adt.String:
+ w.string(literal.String.Quote(x.Str))
+
+ case *adt.Bytes:
+ w.string(literal.Bytes.Quote(string(x.B)))
+
+ case *adt.Top:
+ w.string("_")
+
+ case *adt.BasicType:
+ fmt.Fprint(w, x.K)
+
+ case *adt.BoundExpr:
+ fmt.Fprint(w, x.Op)
+ w.node(x.Expr)
+
+ case *adt.BoundValue:
+ fmt.Fprint(w, x.Op)
+ w.node(x.Value)
+
+ case *adt.NodeLink:
+ w.string(openTuple)
+ for i, f := range x.Node.Path() {
+ if i > 0 {
+ w.string(".")
+ }
+ w.label(f)
+ }
+ w.string(closeTuple)
+
+ case *adt.FieldReference:
+ w.string(openTuple)
+ w.string(strconv.Itoa(int(x.UpCount)))
+ w.string(";")
+ w.label(x.Label)
+ w.string(closeTuple)
+
+ case *adt.ValueReference:
+ w.string(openTuple)
+ w.string(strconv.Itoa(int(x.UpCount)))
+ w.string(closeTuple)
+
+ case *adt.LabelReference:
+ w.string(openTuple)
+ w.string(strconv.Itoa(int(x.UpCount)))
+ w.string(";-")
+ w.string(closeTuple)
+
+ case *adt.DynamicReference:
+ w.string(openTuple)
+ w.string(strconv.Itoa(int(x.UpCount)))
+ w.string(";(")
+ w.node(x.Label)
+ w.string(")")
+ w.string(closeTuple)
+
+ case *adt.ImportReference:
+ w.string(openTuple + "import;")
+ w.label(x.ImportPath)
+ w.string(closeTuple)
+
+ case *adt.LetReference:
+ w.string(openTuple)
+ w.string(strconv.Itoa(int(x.UpCount)))
+ w.string(";let ")
+ w.ident(x.Label)
+ w.string(closeTuple)
+
+ case *adt.SelectorExpr:
+ w.node(x.X)
+ w.string(".")
+ w.label(x.Sel)
+
+ case *adt.IndexExpr:
+ w.node(x.X)
+ w.string("[")
+ w.node(x.Index)
+ w.string("]")
+
+ case *adt.SliceExpr:
+ w.node(x.X)
+ w.string("[")
+ if x.Lo != nil {
+ w.node(x.Lo)
+ }
+ w.string(":")
+ if x.Hi != nil {
+ w.node(x.Hi)
+ }
+ if x.Stride != nil {
+ w.string(":")
+ w.node(x.Stride)
+ }
+ w.string("]")
+
+ case *adt.Interpolation:
+ w.interpolation(x)
+
+ case *adt.UnaryExpr:
+ fmt.Fprint(w, x.Op)
+ w.node(x.X)
+
+ case *adt.BinaryExpr:
+ w.string("(")
+ w.node(x.X)
+ fmt.Fprint(w, " ", x.Op, " ")
+ w.node(x.Y)
+ w.string(")")
+
+ case *adt.CallExpr:
+ w.node(x.Fun)
+ w.string("(")
+ for i, a := range x.Args {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.node(a)
+ }
+ w.string(")")
+
+ case *adt.Builtin:
+ if x.Package != 0 {
+ w.label(x.Package)
+ w.string(".")
+ }
+ w.string(x.Name)
+
+ case *adt.BuiltinValidator:
+ w.node(x.Builtin)
+ w.string("(")
+ for i, a := range x.Args {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.node(a)
+ }
+ w.string(")")
+
+ case *adt.DisjunctionExpr:
+ w.string("(")
+ for i, a := range x.Values {
+ if i > 0 {
+ w.string("|")
+ }
+ // Disjunct
+ if a.Default {
+ w.string("*")
+ }
+ w.node(a.Val)
+ }
+ w.string(")")
+
+ case *adt.Conjunction:
+ w.string("&(")
+ for i, c := range x.Values {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.node(c)
+ }
+ w.string(")")
+
+ case *adt.Disjunction:
+ w.string("|(")
+ for i, c := range x.Values {
+ if i > 0 {
+ w.string(", ")
+ }
+ if i < x.NumDefaults {
+ w.string("*")
+ }
+ w.node(c)
+ }
+ w.string(")")
+
+ case *adt.Comprehension:
+ w.node(x.Clauses)
+ w.node(x.Value)
+
+ case *adt.ForClause:
+ w.string("for ")
+ w.ident(x.Key)
+ w.string(", ")
+ w.ident(x.Value)
+ w.string(" in ")
+ w.node(x.Src)
+ w.string(" ")
+ w.node(x.Dst)
+
+ case *adt.IfClause:
+ w.string("if ")
+ w.node(x.Condition)
+ w.string(" ")
+ w.node(x.Dst)
+
+ case *adt.LetClause:
+ w.string("let ")
+ w.ident(x.Label)
+ w.string(" = ")
+ w.node(x.Expr)
+ w.string(" ")
+ w.node(x.Dst)
+
+ case *adt.ValueClause:
+
+ default:
+ panic(fmt.Sprintf("unknown type %T", x))
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/eval/eval.go b/vendor/cuelang.org/go/internal/core/eval/eval.go
new file mode 100644
index 0000000000..07bc3ac4ff
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/eval/eval.go
@@ -0,0 +1,66 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/debug"
+)
+
+func Evaluate(r adt.Runtime, v *adt.Vertex) {
+ format := func(n adt.Node) string {
+ return debug.NodeString(r, n, printConfig)
+ }
+ c := adt.New(v, &adt.Config{
+ Runtime: r,
+ Format: format,
+ })
+ c.Unify(v, adt.Finalized)
+}
+
+func New(r adt.Runtime) *Unifier {
+ return &Unifier{r: r, e: NewContext(r, nil)}
+}
+
+type Unifier struct {
+ r adt.Runtime
+ e *adt.OpContext
+}
+
+func (e *Unifier) Unify(ctx *adt.OpContext, v *adt.Vertex, state adt.VertexStatus) {
+ e.e.Unify(v, state)
+}
+
+func (e *Unifier) Stats() *adt.Stats {
+ return e.e.Stats()
+}
+
+// TODO: Note: NewContext takes essentially a cue.Value. By making this
+// type more central, we can perhaps avoid context creation.
+func NewContext(r adt.Runtime, v *adt.Vertex) *adt.OpContext {
+ format := func(n adt.Node) string {
+ return debug.NodeString(r, n, printConfig)
+ }
+ return adt.New(v, &adt.Config{
+ Runtime: r,
+ Format: format,
+ })
+}
+
+func (e *Unifier) NewContext(v *adt.Vertex) *adt.OpContext {
+ return NewContext(e.r, v)
+}
+
+var printConfig = &debug.Config{Compact: true}
diff --git a/vendor/cuelang.org/go/internal/core/export/adt.go b/vendor/cuelang.org/go/internal/core/export/adt.go
new file mode 100644
index 0000000000..8e860d609e
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/adt.go
@@ -0,0 +1,493 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func (e *exporter) ident(x adt.Feature) *ast.Ident {
+ s := x.IdentString(e.ctx)
+ if !ast.IsValidIdent(s) {
+ panic(s + " is not a valid identifier")
+ }
+ return ast.NewIdent(s)
+}
+
+func (e *exporter) adt(expr adt.Elem, conjuncts []adt.Conjunct) ast.Expr {
+ switch x := expr.(type) {
+ case adt.Value:
+ return e.expr(x)
+
+ case *adt.ListLit:
+ a := []ast.Expr{}
+ for _, x := range x.Elems {
+ a = append(a, e.elem(x))
+ }
+ return ast.NewList(a...)
+
+ case *adt.StructLit:
+ // TODO: should we use pushFrame here?
+ // _, saved := e.pushFrame([]adt.Conjunct{adt.MakeConjunct(nil, x)})
+ // defer e.popFrame(saved)
+ // s := e.frame(0).scope
+
+ s := &ast.StructLit{}
+
+ for _, d := range x.Decls {
+ var a *ast.Alias
+ if orig, ok := d.Source().(*ast.Field); ok {
+ if alias, ok := orig.Value.(*ast.Alias); ok {
+ if e.valueAlias == nil {
+ e.valueAlias = map[*ast.Alias]*ast.Alias{}
+ }
+ a = &ast.Alias{Ident: ast.NewIdent(alias.Ident.Name)}
+ e.valueAlias[alias] = a
+ }
+ }
+ decl := e.decl(d)
+
+ if a != nil {
+ if f, ok := decl.(*ast.Field); ok {
+ a.Expr = f.Value
+ f.Value = a
+ }
+ }
+
+ s.Elts = append(s.Elts, decl)
+ }
+
+ return s
+
+ case *adt.FieldReference:
+ f := e.frame(x.UpCount)
+ entry := f.fields[x.Label]
+
+ name := x.Label.IdentString(e.ctx)
+ switch {
+ case entry.alias != "":
+ name = entry.alias
+
+ case !ast.IsValidIdent(name):
+ name = "X"
+ if x.Src != nil {
+ name = x.Src.Name
+ }
+ name = e.uniqueAlias(name)
+ entry.alias = name
+ }
+
+ ident := ast.NewIdent(name)
+ entry.references = append(entry.references, ident)
+
+ if f.fields != nil {
+ f.fields[x.Label] = entry
+ }
+
+ return ident
+
+ case *adt.ValueReference:
+ name := x.Label.IdentString(e.ctx)
+ if a, ok := x.Src.Node.(*ast.Alias); ok { // Should always pass
+ if b, ok := e.valueAlias[a]; ok {
+ name = b.Ident.Name
+ }
+ }
+ ident := ast.NewIdent(name)
+ return ident
+
+ case *adt.LabelReference:
+ // get potential label from Source. Otherwise use X.
+ f := e.frame(x.UpCount)
+ if f.field == nil {
+ // This can happen when the LabelReference is evaluated outside of
+ // normal evaluation, that is, if a pattern constraint or
+ // additional constraint is evaluated by itself.
+ return ast.NewIdent("string")
+ }
+ list, ok := f.field.Label.(*ast.ListLit)
+ if !ok || len(list.Elts) != 1 {
+ panic("label reference to non-pattern constraint field or invalid list")
+ }
+ name := ""
+ if a, ok := list.Elts[0].(*ast.Alias); ok {
+ name = a.Ident.Name
+ } else {
+ if x.Src != nil {
+ name = x.Src.Name
+ }
+ name = e.uniqueAlias(name)
+ list.Elts[0] = &ast.Alias{
+ Ident: ast.NewIdent(name),
+ Expr: list.Elts[0],
+ }
+ }
+ ident := ast.NewIdent(name)
+ ident.Scope = f.field
+ ident.Node = f.labelExpr
+ return ident
+
+ case *adt.DynamicReference:
+ // get potential label from Source. Otherwise use X.
+ name := "X"
+ f := e.frame(x.UpCount)
+ if d := f.field; d != nil {
+ if x.Src != nil {
+ name = x.Src.Name
+ }
+ name = e.getFieldAlias(d, name)
+ }
+ ident := ast.NewIdent(name)
+ ident.Scope = f.field
+ ident.Node = f.field
+ return ident
+
+ case *adt.ImportReference:
+ importPath := x.ImportPath.StringValue(e.index)
+ spec := ast.NewImport(nil, importPath)
+
+ info, _ := astutil.ParseImportSpec(spec)
+ name := info.PkgName
+ if x.Label != 0 {
+ name = x.Label.StringValue(e.index)
+ if name != info.PkgName {
+ spec.Name = ast.NewIdent(name)
+ }
+ }
+ ident := ast.NewIdent(name)
+ ident.Node = spec
+ return ident
+
+ case *adt.LetReference:
+ return e.resolveLet(x)
+
+ case *adt.SelectorExpr:
+ return &ast.SelectorExpr{
+ X: e.expr(x.X),
+ Sel: e.stringLabel(x.Sel),
+ }
+
+ case *adt.IndexExpr:
+ return &ast.IndexExpr{
+ X: e.expr(x.X),
+ Index: e.expr(x.Index),
+ }
+
+ case *adt.SliceExpr:
+ var lo, hi ast.Expr
+ if x.Lo != nil {
+ lo = e.expr(x.Lo)
+ }
+ if x.Hi != nil {
+ hi = e.expr(x.Hi)
+ }
+ // TODO: Stride not yet? implemented.
+ // if x.Stride != nil {
+ // stride = e.expr(x.Stride)
+ // }
+ return &ast.SliceExpr{X: e.expr(x.X), Low: lo, High: hi}
+
+ case *adt.Interpolation:
+ var (
+ tripple = `"""`
+ openQuote = `"`
+ closeQuote = `"`
+ f = literal.String
+ )
+ if x.K&adt.BytesKind != 0 {
+ tripple = `'''`
+ openQuote = `'`
+ closeQuote = `'`
+ f = literal.Bytes
+ }
+ toString := func(v adt.Expr) string {
+ str := ""
+ switch x := v.(type) {
+ case *adt.String:
+ str = x.Str
+ case *adt.Bytes:
+ str = string(x.B)
+ }
+ return str
+ }
+ t := &ast.Interpolation{}
+ f = f.WithGraphicOnly()
+ indent := ""
+ // TODO: mark formatting in interpolation itself.
+ for i := 0; i < len(x.Parts); i += 2 {
+ if strings.IndexByte(toString(x.Parts[i]), '\n') >= 0 {
+ f = f.WithTabIndent(len(e.stack))
+ indent = strings.Repeat("\t", len(e.stack))
+ openQuote = tripple + "\n" + indent
+ closeQuote = tripple
+ break
+ }
+ }
+ prefix := openQuote
+ suffix := `\(`
+ for i, elem := range x.Parts {
+ if i%2 == 1 {
+ t.Elts = append(t.Elts, e.expr(elem))
+ } else {
+ // b := strings.Builder{}
+ buf := []byte(prefix)
+ str := toString(elem)
+ buf = f.AppendEscaped(buf, str)
+ if i == len(x.Parts)-1 {
+ if len(closeQuote) > 1 {
+ buf = append(buf, '\n')
+ buf = append(buf, indent...)
+ }
+ buf = append(buf, closeQuote...)
+ } else {
+ if bytes.HasSuffix(buf, []byte("\n")) {
+ buf = append(buf, indent...)
+ }
+ buf = append(buf, suffix...)
+ }
+ t.Elts = append(t.Elts, &ast.BasicLit{
+ Kind: token.STRING,
+ Value: string(buf),
+ })
+ }
+ prefix = ")"
+ }
+ return t
+
+ case *adt.BoundExpr:
+ return &ast.UnaryExpr{
+ Op: x.Op.Token(),
+ X: e.expr(x.Expr),
+ }
+
+ case *adt.UnaryExpr:
+ return &ast.UnaryExpr{
+ Op: x.Op.Token(),
+ X: e.expr(x.X),
+ }
+
+ case *adt.BinaryExpr:
+ return &ast.BinaryExpr{
+ Op: x.Op.Token(),
+ X: e.expr(x.X),
+ Y: e.expr(x.Y),
+ }
+
+ case *adt.CallExpr:
+ a := []ast.Expr{}
+ for _, arg := range x.Args {
+ v := e.expr(arg)
+ if v == nil {
+ e.expr(arg)
+ panic("")
+ }
+ a = append(a, v)
+ }
+ fun := e.expr(x.Fun)
+ return &ast.CallExpr{Fun: fun, Args: a}
+
+ case *adt.DisjunctionExpr:
+ a := []ast.Expr{}
+ for _, d := range x.Values {
+ v := e.expr(d.Val)
+ if d.Default {
+ v = &ast.UnaryExpr{Op: token.MUL, X: v}
+ }
+ a = append(a, v)
+ }
+ return ast.NewBinExpr(token.OR, a...)
+
+ default:
+ panic(fmt.Sprintf("unknown field %T", x))
+ }
+}
+
+func (e *exporter) decl(d adt.Decl) ast.Decl {
+ switch x := d.(type) {
+ case adt.Elem:
+ return e.elem(x)
+
+ case *adt.Field:
+ e.setDocs(x)
+ f := &ast.Field{
+ Label: e.stringLabel(x.Label),
+ }
+
+ frame := e.frame(0)
+ entry := frame.fields[x.Label]
+ entry.field = f
+ entry.node = f.Value
+ frame.fields[x.Label] = entry
+
+ f.Value = e.expr(x.Value)
+
+ // extractDocs(nil)
+ return f
+
+ case *adt.OptionalField:
+ e.setDocs(x)
+ f := &ast.Field{
+ Label: e.stringLabel(x.Label),
+ Optional: token.NoSpace.Pos(),
+ }
+
+ frame := e.frame(0)
+ entry := frame.fields[x.Label]
+ entry.field = f
+ entry.node = f.Value
+ frame.fields[x.Label] = entry
+
+ f.Value = e.expr(x.Value)
+
+ // extractDocs(nil)
+ return f
+
+ case *adt.BulkOptionalField:
+ e.setDocs(x)
+ // set bulk in frame.
+ frame := e.frame(0)
+
+ expr := e.expr(x.Filter)
+ frame.labelExpr = expr // see astutil.Resolve.
+
+ if x.Label != 0 {
+ expr = &ast.Alias{Ident: e.ident(x.Label), Expr: expr}
+ }
+ f := &ast.Field{
+ Label: ast.NewList(expr),
+ }
+
+ frame.field = f
+
+ f.Value = e.expr(x.Value)
+
+ return f
+
+ case *adt.DynamicField:
+ e.setDocs(x)
+ key := e.expr(x.Key)
+ if _, ok := key.(*ast.Interpolation); !ok {
+ key = &ast.ParenExpr{X: key}
+ }
+ f := &ast.Field{
+ Label: key.(ast.Label),
+ }
+
+ frame := e.frame(0)
+ frame.field = f
+ frame.labelExpr = key
+ // extractDocs(nil)
+
+ f.Value = e.expr(x.Value)
+
+ return f
+
+ default:
+ panic(fmt.Sprintf("unknown field %T", x))
+ }
+}
+
+func (e *exporter) elem(d adt.Elem) ast.Expr {
+
+ switch x := d.(type) {
+ case adt.Expr:
+ return e.expr(x)
+
+ case *adt.Ellipsis:
+ t := &ast.Ellipsis{}
+ if x.Value != nil {
+ t.Type = e.expr(x.Value)
+ }
+ return t
+
+ case *adt.Comprehension:
+ return e.comprehension(x)
+
+ default:
+ panic(fmt.Sprintf("unknown field %T", x))
+ }
+}
+
+func (e *exporter) comprehension(comp *adt.Comprehension) *ast.Comprehension {
+ c := &ast.Comprehension{}
+
+ y := comp.Clauses
+
+loop:
+ for {
+ switch x := y.(type) {
+ case *adt.ForClause:
+ value := e.ident(x.Value)
+ clause := &ast.ForClause{
+ Value: value,
+ Source: e.expr(x.Src),
+ }
+ c.Clauses = append(c.Clauses, clause)
+
+ _, saved := e.pushFrame(nil)
+ defer e.popFrame(saved)
+
+ if x.Key != adt.InvalidLabel ||
+ (x.Syntax != nil && x.Syntax.Key != nil) {
+ key := e.ident(x.Key)
+ clause.Key = key
+ e.addField(x.Key, nil, clause)
+ }
+ e.addField(x.Value, nil, clause)
+
+ y = x.Dst
+
+ case *adt.IfClause:
+ clause := &ast.IfClause{Condition: e.expr(x.Condition)}
+ c.Clauses = append(c.Clauses, clause)
+ y = x.Dst
+
+ case *adt.LetClause:
+ clause := &ast.LetClause{
+ Ident: e.ident(x.Label),
+ Expr: e.expr(x.Expr),
+ }
+ c.Clauses = append(c.Clauses, clause)
+
+ _, saved := e.pushFrame(nil)
+ defer e.popFrame(saved)
+
+ e.addField(x.Label, nil, clause)
+
+ y = x.Dst
+
+ case *adt.ValueClause:
+ break loop
+
+ default:
+ panic(fmt.Sprintf("unknown field %T", x))
+ }
+ }
+
+ v := e.expr(comp.Value)
+ if _, ok := v.(*ast.StructLit); !ok {
+ v = ast.NewStruct(ast.Embed(v))
+ }
+ c.Value = v
+ return c
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/bounds.go b/vendor/cuelang.org/go/internal/core/export/bounds.go
new file mode 100644
index 0000000000..71788a9d99
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/bounds.go
@@ -0,0 +1,213 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/internal/core/adt"
+ "github.com/cockroachdb/apd/v2"
+)
+
+// boundSimplifier simplifies bound values into predeclared identifiers, if
+// possible.
+type boundSimplifier struct {
+ e *exporter
+
+ isInt bool
+ min *adt.BoundValue
+ minNum *adt.Num
+ max *adt.BoundValue
+ maxNum *adt.Num
+}
+
+func (s *boundSimplifier) add(v adt.Value) (used bool) {
+ switch x := v.(type) {
+ case *adt.BasicType:
+ switch x.K & adt.ScalarKinds {
+ case adt.IntKind:
+ s.isInt = true
+ return true
+ }
+
+ case *adt.BoundValue:
+ if adt.IsConcrete(x.Value) && x.Kind() == adt.IntKind {
+ s.isInt = true
+ }
+ switch x.Op {
+ case adt.GreaterThanOp:
+ if n, ok := x.Value.(*adt.Num); ok {
+ if s.min == nil || s.minNum.X.Cmp(&n.X) != 1 {
+ s.min = x
+ s.minNum = n
+ }
+ return true
+ }
+
+ case adt.GreaterEqualOp:
+ if n, ok := x.Value.(*adt.Num); ok {
+ if s.min == nil || s.minNum.X.Cmp(&n.X) == -1 {
+ s.min = x
+ s.minNum = n
+ }
+ return true
+ }
+
+ case adt.LessThanOp:
+ if n, ok := x.Value.(*adt.Num); ok {
+ if s.max == nil || s.maxNum.X.Cmp(&n.X) != -1 {
+ s.max = x
+ s.maxNum = n
+ }
+ return true
+ }
+
+ case adt.LessEqualOp:
+ if n, ok := x.Value.(*adt.Num); ok {
+ if s.max == nil || s.maxNum.X.Cmp(&n.X) == 1 {
+ s.max = x
+ s.maxNum = n
+ }
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+type builtinRange struct {
+ typ string
+ lo *apd.Decimal
+ hi *apd.Decimal
+}
+
+func makeDec(s string) *apd.Decimal {
+ d, _, err := apd.NewFromString(s)
+ if err != nil {
+ panic(err)
+ }
+ return d
+}
+
+func (s *boundSimplifier) expr(ctx *adt.OpContext) (e ast.Expr) {
+ if s.min == nil || s.max == nil {
+ return nil
+ }
+ switch {
+ case s.isInt:
+ t := s.matchRange(intRanges)
+ if t != "" {
+ e = ast.NewIdent(t)
+ break
+ }
+ if sign := s.minNum.X.Sign(); sign == -1 {
+ e = ast.NewIdent("int")
+
+ } else {
+ e = ast.NewIdent("uint")
+ if sign == 0 && s.min.Op == adt.GreaterEqualOp {
+ s.min = nil
+ break
+ }
+ }
+ fallthrough
+ default:
+ t := s.matchRange(floatRanges)
+ if t != "" {
+ e = wrapBin(e, ast.NewIdent(t), adt.AndOp)
+ }
+ }
+
+ if s.min != nil {
+ e = wrapBin(e, s.e.expr(s.min), adt.AndOp)
+ }
+ if s.max != nil {
+ e = wrapBin(e, s.e.expr(s.max), adt.AndOp)
+ }
+ return e
+}
+
+func (s *boundSimplifier) matchRange(ranges []builtinRange) (t string) {
+ for _, r := range ranges {
+ if !s.minNum.X.IsZero() && s.min.Op == adt.GreaterEqualOp && s.minNum.X.Cmp(r.lo) == 0 {
+ switch s.maxNum.X.Cmp(r.hi) {
+ case 0:
+ if s.max.Op == adt.LessEqualOp {
+ s.max = nil
+ }
+ s.min = nil
+ return r.typ
+ case -1:
+ if !s.minNum.X.IsZero() {
+ s.min = nil
+ return r.typ
+ }
+ case 1:
+ }
+ } else if s.max.Op == adt.LessEqualOp && s.maxNum.X.Cmp(r.hi) == 0 {
+ switch s.minNum.X.Cmp(r.lo) {
+ case -1:
+ case 0:
+ if s.min.Op == adt.GreaterEqualOp {
+ s.min = nil
+ }
+ fallthrough
+ case 1:
+ s.max = nil
+ return r.typ
+ }
+ }
+ }
+ return ""
+}
+
+var intRanges = []builtinRange{
+ {"int8", makeDec("-128"), makeDec("127")},
+ {"int16", makeDec("-32768"), makeDec("32767")},
+ {"int32", makeDec("-2147483648"), makeDec("2147483647")},
+ {"int64", makeDec("-9223372036854775808"), makeDec("9223372036854775807")},
+ {"int128", makeDec("-170141183460469231731687303715884105728"),
+ makeDec("170141183460469231731687303715884105727")},
+
+ {"uint8", makeDec("0"), makeDec("255")},
+ {"uint16", makeDec("0"), makeDec("65535")},
+ {"uint32", makeDec("0"), makeDec("4294967295")},
+ {"uint64", makeDec("0"), makeDec("18446744073709551615")},
+ {"uint128", makeDec("0"), makeDec("340282366920938463463374607431768211455")},
+
+ // {"rune", makeDec("0"), makeDec(strconv.Itoa(0x10FFFF))},
+}
+
+var floatRanges = []builtinRange{
+ // 2**127 * (2**24 - 1) / 2**23
+ {"float32",
+ makeDec("-3.40282346638528859811704183484516925440e+38"),
+ makeDec("3.40282346638528859811704183484516925440e+38")},
+
+ // 2**1023 * (2**53 - 1) / 2**52
+ {"float64",
+ makeDec("-1.797693134862315708145274237317043567981e+308"),
+ makeDec("1.797693134862315708145274237317043567981e+308")},
+}
+
+func wrapBin(a, b ast.Expr, op adt.Op) ast.Expr {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ return ast.NewBinExpr(op.Token(), a, b)
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/export.go b/vendor/cuelang.org/go/internal/core/export/export.go
new file mode 100644
index 0000000000..cfac720634
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/export.go
@@ -0,0 +1,565 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "fmt"
+ "math/rand"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/eval"
+ "cuelang.org/go/internal/core/walk"
+)
+
+const debug = false
+
+type Profile struct {
+ Simplify bool
+
+ // Final reports incomplete errors as errors.
+ Final bool
+
+ // TakeDefaults is used in Value mode to drop non-default values.
+ TakeDefaults bool
+
+ ShowOptional bool
+ ShowDefinitions bool
+
+ // ShowHidden forces the inclusion of hidden fields when these would
+ // otherwise be omitted. Only hidden fields from the current package are
+ // included.
+ ShowHidden bool
+ ShowDocs bool
+ ShowAttributes bool
+
+ // ShowErrors treats errors as values and will not percolate errors up.
+ //
+ // TODO: convert this option to an error level instead, showing only
+ // errors below a certain severity.
+ ShowErrors bool
+
+ // Use unevaluated conjuncts for these error types
+ // IgnoreRecursive
+
+ // TODO: recurse over entire tree to determine transitive closure
+ // of what needs to be printed.
+ // IncludeDependencies bool
+}
+
+var Simplified = &Profile{
+ Simplify: true,
+ ShowDocs: true,
+}
+
+var Final = &Profile{
+ Simplify: true,
+ TakeDefaults: true,
+ Final: true,
+}
+
+var Raw = &Profile{
+ ShowOptional: true,
+ ShowDefinitions: true,
+ ShowHidden: true,
+ ShowDocs: true,
+}
+
+var All = &Profile{
+ Simplify: true,
+ ShowOptional: true,
+ ShowDefinitions: true,
+ ShowHidden: true,
+ ShowDocs: true,
+ ShowAttributes: true,
+}
+
+// Concrete
+
+// Def exports v as a definition.
+func Def(r adt.Runtime, pkgID string, v *adt.Vertex) (*ast.File, errors.Error) {
+ return All.Def(r, pkgID, v)
+}
+
+// Def exports v as a definition.
+func (p *Profile) Def(r adt.Runtime, pkgID string, v *adt.Vertex) (*ast.File, errors.Error) {
+ e := newExporter(p, r, pkgID, v)
+ e.markUsedFeatures(v)
+
+ isDef := v.IsRecursivelyClosed()
+ if isDef {
+ e.inDefinition++
+ }
+
+ expr := e.expr(v)
+
+ if isDef {
+ e.inDefinition--
+ if v.Kind() == adt.StructKind {
+ expr = ast.NewStruct(
+ ast.Embed(ast.NewIdent("_#def")),
+ ast.NewIdent("_#def"), expr,
+ )
+ }
+ }
+ return e.toFile(v, expr)
+}
+
+func Expr(r adt.Runtime, pkgID string, n adt.Expr) (ast.Expr, errors.Error) {
+ return Simplified.Expr(r, pkgID, n)
+}
+
+func (p *Profile) Expr(r adt.Runtime, pkgID string, n adt.Expr) (ast.Expr, errors.Error) {
+ e := newExporter(p, r, pkgID, nil)
+ e.markUsedFeatures(n)
+
+ return e.expr(n), nil
+}
+
+func (e *exporter) toFile(v *adt.Vertex, x ast.Expr) (*ast.File, errors.Error) {
+ f := &ast.File{}
+
+ pkgName := ""
+ pkg := &ast.Package{}
+ for _, c := range v.Conjuncts {
+ f, _ := c.Source().(*ast.File)
+ if f == nil {
+ continue
+ }
+
+ if _, name, _ := internal.PackageInfo(f); name != "" {
+ pkgName = name
+ }
+
+ if e.cfg.ShowDocs {
+ if doc := internal.FileComment(f); doc != nil {
+ ast.AddComment(pkg, doc)
+ }
+ }
+ }
+
+ if pkgName != "" {
+ pkg.Name = ast.NewIdent(pkgName)
+ f.Decls = append(f.Decls, pkg)
+ }
+
+ switch st := x.(type) {
+ case nil:
+ panic("null input")
+
+ case *ast.StructLit:
+ f.Decls = append(f.Decls, st.Elts...)
+
+ default:
+ f.Decls = append(f.Decls, &ast.EmbedDecl{Expr: x})
+ }
+ if err := astutil.Sanitize(f); err != nil {
+ err := errors.Promote(err, "export")
+ return f, errors.Append(e.errs, err)
+ }
+
+ return f, nil
+}
+
+// File
+
+func Vertex(r adt.Runtime, pkgID string, n *adt.Vertex) (*ast.File, errors.Error) {
+ return Simplified.Vertex(r, pkgID, n)
+}
+
+func (p *Profile) Vertex(r adt.Runtime, pkgID string, n *adt.Vertex) (*ast.File, errors.Error) {
+ e := exporter{
+ ctx: eval.NewContext(r, nil),
+ cfg: p,
+ index: r,
+ pkgID: pkgID,
+ }
+ e.markUsedFeatures(n)
+ v := e.value(n, n.Conjuncts...)
+
+ return e.toFile(n, v)
+}
+
+func Value(r adt.Runtime, pkgID string, n adt.Value) (ast.Expr, errors.Error) {
+ return Simplified.Value(r, pkgID, n)
+}
+
+// Should take context.
+func (p *Profile) Value(r adt.Runtime, pkgID string, n adt.Value) (ast.Expr, errors.Error) {
+ e := exporter{
+ ctx: eval.NewContext(r, nil),
+ cfg: p,
+ index: r,
+ pkgID: pkgID,
+ }
+ e.markUsedFeatures(n)
+ v := e.value(n)
+ return v, e.errs
+}
+
+type exporter struct {
+ cfg *Profile // Make value todo
+ errs errors.Error
+
+ ctx *adt.OpContext
+
+ index adt.StringIndexer
+ rand *rand.Rand
+
+ // For resolving references.
+ stack []frame
+
+ inDefinition int // for close() wrapping.
+
+ // hidden label handling
+ pkgID string
+ hidden map[string]adt.Feature // adt.InvalidFeatures means more than one.
+
+ // If a used feature maps to an expression, it means it is assigned to a
+ // unique let expression.
+ usedFeature map[adt.Feature]adt.Expr
+ labelAlias map[adt.Expr]adt.Feature
+ valueAlias map[*ast.Alias]*ast.Alias
+ letAlias map[*ast.LetClause]*ast.LetClause
+
+ usedHidden map[string]bool
+}
+
+func newExporter(p *Profile, r adt.Runtime, pkgID string, v *adt.Vertex) *exporter {
+ return &exporter{
+ cfg: p,
+ ctx: eval.NewContext(r, v),
+ index: r,
+ pkgID: pkgID,
+ }
+}
+
+func (e *exporter) markUsedFeatures(x adt.Expr) {
+ e.usedFeature = make(map[adt.Feature]adt.Expr)
+
+ w := &walk.Visitor{}
+ w.Before = func(n adt.Node) bool {
+ switch x := n.(type) {
+ case *adt.Vertex:
+ if !x.IsData() {
+ for _, c := range x.Conjuncts {
+ w.Elem(c.Elem())
+ }
+ }
+
+ case *adt.DynamicReference:
+ if e.labelAlias == nil {
+ e.labelAlias = make(map[adt.Expr]adt.Feature)
+ }
+ // TODO: add preferred label.
+ e.labelAlias[x.Label] = adt.InvalidLabel
+
+ case *adt.LabelReference:
+ }
+ return true
+ }
+
+ w.Feature = func(f adt.Feature, src adt.Node) {
+ _, ok := e.usedFeature[f]
+
+ switch x := src.(type) {
+ case *adt.LetReference:
+ if !ok {
+ e.usedFeature[f] = x.X
+ }
+
+ default:
+ e.usedFeature[f] = nil
+ }
+ }
+
+ w.Elem(x)
+}
+
+func (e *exporter) getFieldAlias(f *ast.Field, name string) string {
+ a, ok := f.Label.(*ast.Alias)
+ if !ok {
+ a = &ast.Alias{
+ Ident: ast.NewIdent(e.uniqueAlias(name)),
+ Expr: f.Label.(ast.Expr),
+ }
+ f.Label = a
+ }
+ return a.Ident.Name
+}
+
+func setFieldAlias(f *ast.Field, name string) {
+ if _, ok := f.Label.(*ast.Alias); !ok {
+ f.Label = &ast.Alias{
+ Ident: ast.NewIdent(name),
+ Expr: f.Label.(ast.Expr),
+ }
+ }
+}
+
+func (e *exporter) markLets(n ast.Node) {
+ if n == nil {
+ return
+ }
+ ast.Walk(n, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case *ast.StructLit:
+ e.markLetDecls(v.Elts)
+ case *ast.File:
+ e.markLetDecls(v.Decls)
+
+ case *ast.Field,
+ *ast.LetClause,
+ *ast.IfClause,
+ *ast.ForClause,
+ *ast.Comprehension:
+ return false
+ }
+ return true
+ }, nil)
+}
+
+func (e *exporter) markLetDecls(decls []ast.Decl) {
+ for _, d := range decls {
+ if let, ok := d.(*ast.LetClause); ok {
+ e.markLetAlias(let)
+ }
+ }
+}
+
+// markLetAlias inserts an uninitialized let clause into the current scope.
+// It gets initialized upon first usage.
+func (e *exporter) markLetAlias(x *ast.LetClause) {
+ // The created let clause is initialized upon first usage, and removed
+ // later if never referenced.
+ let := &ast.LetClause{}
+
+ if e.letAlias == nil {
+ e.letAlias = make(map[*ast.LetClause]*ast.LetClause)
+ }
+ e.letAlias[x] = let
+
+ scope := e.top().scope
+ scope.Elts = append(scope.Elts, let)
+}
+
+// In value mode, lets are only used if there wasn't an error.
+func filterUnusedLets(s *ast.StructLit) {
+ k := 0
+ for i, d := range s.Elts {
+ if let, ok := d.(*ast.LetClause); ok && let.Expr == nil {
+ continue
+ }
+ s.Elts[k] = s.Elts[i]
+ k++
+ }
+ s.Elts = s.Elts[:k]
+}
+
+// resolveLet actually parses the let expression.
+// If there was no recorded let expression, it expands the expression in place.
+func (e *exporter) resolveLet(x *adt.LetReference) ast.Expr {
+ letClause, _ := x.Src.Node.(*ast.LetClause)
+ let := e.letAlias[letClause]
+
+ switch {
+ case let == nil:
+ return e.expr(x.X)
+
+ case let.Expr == nil:
+ label := e.uniqueLetIdent(x.Label, x.X)
+
+ let.Ident = e.ident(label)
+ let.Expr = e.expr(x.X)
+ }
+
+ ident := ast.NewIdent(let.Ident.Name)
+ ident.Node = let
+ // TODO: set scope?
+ return ident
+}
+
+func (e *exporter) uniqueLetIdent(f adt.Feature, x adt.Expr) adt.Feature {
+ if e.usedFeature[f] == x {
+ return f
+ }
+
+ f, _ = e.uniqueFeature(f.IdentString(e.ctx))
+ e.usedFeature[f] = x
+ return f
+}
+
+func (e *exporter) uniqueAlias(name string) string {
+ f := adt.MakeIdentLabel(e.ctx, name, "")
+
+ if _, ok := e.usedFeature[f]; !ok {
+ e.usedFeature[f] = nil
+ return name
+ }
+
+ _, name = e.uniqueFeature(f.IdentString(e.ctx))
+ return name
+}
+
+// uniqueFeature returns a name for an identifier that uniquely identifies
+// the given expression. If the preferred name is already taken, a new globally
+// unique name of the form base_X ... base_XXXXXXXXXXXXXX is generated.
+//
+// It prefers short extensions over large ones, while ensuring the likelihood of
+// fast termination is high. There are at least two digits to make it visually
+// clearer this concerns a generated number.
+//
+func (e *exporter) uniqueFeature(base string) (f adt.Feature, name string) {
+ if e.rand == nil {
+ e.rand = rand.New(rand.NewSource(808))
+ }
+
+ // Try the first few numbers in sequence.
+ for i := 1; i < 5; i++ {
+ name := fmt.Sprintf("%s_%01X", base, i)
+ f := adt.MakeIdentLabel(e.ctx, name, "")
+ if _, ok := e.usedFeature[f]; !ok {
+ e.usedFeature[f] = nil
+ return f, name
+ }
+ }
+
+ const mask = 0xff_ffff_ffff_ffff // max bits; stay clear of int64 overflow
+ const shift = 4 // rate of growth
+ digits := 1
+ for n := int64(0x10); ; n = int64(mask&((n<= 0; i-- {
+ f := &(e.stack[i])
+ if upCount <= (f.upCount - 1) {
+ return f
+ }
+ upCount -= f.upCount
+ }
+ if debug {
+ // This may be valid when exporting incomplete references. These are
+ // not yet handled though, so find a way to catch them when debugging
+ // printing of values that are supposed to be complete.
+ panic("unreachable reference")
+ }
+
+ return &frame{}
+}
+
+func (e *exporter) setDocs(x adt.Node) {
+ f := e.stack[len(e.stack)-1]
+ f.docSources = []adt.Conjunct{adt.MakeRootConjunct(nil, x)}
+ e.stack[len(e.stack)-1] = f
+}
+
+// func (e *Exporter) promise(upCount int32, f completeFunc) {
+// e.todo = append(e.todo, f)
+// }
+
+func (e *exporter) errf(format string, args ...interface{}) *ast.BottomLit {
+ err := &exporterError{}
+ e.errs = errors.Append(e.errs, err)
+ return &ast.BottomLit{}
+}
+
+type errTODO errors.Error
+
+type exporterError struct {
+ errTODO
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/expr.go b/vendor/cuelang.org/go/internal/core/export/expr.go
new file mode 100644
index 0000000000..c8d7a1290d
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/expr.go
@@ -0,0 +1,510 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "fmt"
+ "sort"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+// Modes:
+// raw: as is
+// def: merge structs, print reset as is.
+//
+// Possible simplifications in def mode:
+// - merge contents of multiple _literal_ structs.
+// - this is not possible if some of the elements are bulk optional
+// (or is it?).
+// - still do not ever resolve references.
+// - to do this, fields must be pre-linked to their destinations.
+// - use astutil.Sanitize to resolve shadowing and imports.
+//
+//
+// Categories of printing:
+// - concrete
+// - optionals
+// - references
+// - constraints
+//
+// Mixed mode is also not supported in the old implementation (at least not
+// correctly). It requires references to resolve properly, backtracking to
+// a common root and prefixing that to the reference. This is now possible
+// with the Environment construct and could be done later.
+
+func (e *exporter) expr(v adt.Elem) (result ast.Expr) {
+ switch x := v.(type) {
+ case nil:
+ return nil
+
+ case *adt.Vertex:
+ if len(x.Conjuncts) == 0 || x.IsData() {
+ // Treat as literal value.
+ return e.value(x)
+ } // Should this be the arcs label?
+
+ a := []conjunct{}
+ for _, c := range x.Conjuncts {
+ a = append(a, conjunct{c, 0})
+ }
+
+ return e.mergeValues(adt.InvalidLabel, x, a, x.Conjuncts...)
+
+ case *adt.StructLit:
+ c := adt.MakeRootConjunct(nil, x)
+ return e.mergeValues(adt.InvalidLabel, nil, []conjunct{{c: c, up: 0}}, c)
+
+ case adt.Value:
+ return e.value(x) // Use conjuncts.
+
+ default:
+ return e.adt(v, nil)
+ }
+}
+
+// Piece out values:
+
+// For a struct, piece out conjuncts that are already values. Those can be
+// unified. All other conjuncts are added verbatim.
+
+func (x *exporter) mergeValues(label adt.Feature, src *adt.Vertex, a []conjunct, orig ...adt.Conjunct) (expr ast.Expr) {
+
+ e := conjuncts{
+ exporter: x,
+ values: &adt.Vertex{},
+ fields: map[adt.Feature]field{},
+ attrs: []*ast.Attribute{},
+ }
+
+ s, saved := e.pushFrame(orig)
+ defer e.popFrame(saved)
+
+ // Handle value aliases and lets
+ var valueAlias *ast.Alias
+ for _, c := range a {
+ if f, ok := c.c.Field().Source().(*ast.Field); ok {
+ if a, ok := f.Value.(*ast.Alias); ok {
+ if valueAlias == nil {
+ if e.valueAlias == nil {
+ e.valueAlias = map[*ast.Alias]*ast.Alias{}
+ }
+ name := a.Ident.Name
+ name = e.uniqueAlias(name)
+ valueAlias = &ast.Alias{Ident: ast.NewIdent(name)}
+ }
+ e.valueAlias[a] = valueAlias
+ }
+ }
+ x.markLets(c.c.Expr().Source())
+ }
+
+ defer filterUnusedLets(s)
+
+ defer func() {
+ if valueAlias != nil {
+ valueAlias.Expr = expr
+ expr = valueAlias
+ }
+ }()
+
+ hasAlias := len(s.Elts) > 0
+
+ for _, c := range a {
+ e.top().upCount = c.up
+ x := c.c.Elem()
+ e.addExpr(c.c.Env, src, x, false)
+ }
+
+ if src != nil {
+ for _, a := range src.Arcs {
+ if x, ok := e.fields[a.Label]; ok {
+ x.arc = a
+ e.fields[a.Label] = x
+ }
+ }
+ }
+
+ for _, a := range e.attrs {
+ s.Elts = append(s.Elts, a)
+ }
+
+ // Unify values only for one level.
+ if a := e.values.Conjuncts; len(a) > 0 {
+ e.values.Finalize(e.ctx)
+ e.embed = append(e.embed, e.value(e.values, a...))
+ }
+
+ // Collect and order set of fields.
+
+ fields := []adt.Feature{}
+ for f := range e.fields {
+ fields = append(fields, f)
+ }
+
+ // Sort fields in case features lists are missing to ensure
+ // predictability. Also sort in reverse order, so that bugs
+ // are more likely exposed.
+ sort.Slice(fields, func(i, j int) bool {
+ return fields[i] > fields[j]
+ })
+
+ if adt.DebugSort == 0 {
+ m := sortArcs(extractFeatures(e.structs))
+ sort.SliceStable(fields, func(i, j int) bool {
+ if m[fields[j]] == 0 {
+ return m[fields[i]] != 0
+ }
+ return m[fields[i]] > m[fields[j]]
+ })
+ } else {
+ adt.DebugSortFields(e.ctx, fields)
+ }
+
+ if len(e.fields) == 0 && !e.hasEllipsis {
+ switch len(e.embed) + len(e.conjuncts) {
+ case 0:
+ if len(e.attrs) > 0 {
+ break
+ }
+ if len(e.structs) > 0 || e.isData {
+ return e.wrapCloseIfNecessary(s, src)
+ }
+ return ast.NewIdent("_")
+ case 1:
+ var x ast.Expr
+ if len(e.conjuncts) == 1 {
+ x = e.conjuncts[0]
+ } else {
+ x = e.embed[0]
+ }
+ if len(e.attrs) == 0 && !hasAlias {
+ return x
+ }
+ if st, ok := x.(*ast.StructLit); ok {
+ s.Elts = append(s.Elts, st.Elts...)
+ return e.wrapCloseIfNecessary(s, src)
+ }
+ }
+ }
+
+ for _, x := range e.embed {
+ s.Elts = append(s.Elts, &ast.EmbedDecl{Expr: x})
+ }
+
+ for _, f := range fields {
+ field := e.fields[f]
+ c := field.conjuncts
+
+ label := e.stringLabel(f)
+
+ if f.IsDef() {
+ x.inDefinition++
+ }
+
+ a := []adt.Conjunct{}
+ for _, cc := range c {
+ a = append(a, cc.c)
+ }
+
+ d := &ast.Field{Label: label}
+
+ top := e.frame(0)
+ if fr, ok := top.fields[f]; ok && fr.alias != "" {
+ setFieldAlias(d, fr.alias)
+ fr.node = d
+ top.fields[f] = fr
+ }
+
+ d.Value = e.mergeValues(f, field.arc, c, a...)
+
+ if f.IsDef() {
+ x.inDefinition--
+ }
+
+ if isOptional(a) {
+ d.Optional = token.Blank.Pos()
+ }
+ if x.cfg.ShowDocs {
+ docs := extractDocs(src, a)
+ ast.SetComments(d, docs)
+ }
+ if x.cfg.ShowAttributes {
+ for _, c := range a {
+ d.Attrs = extractFieldAttrs(d.Attrs, c)
+ }
+ }
+ s.Elts = append(s.Elts, d)
+ }
+ if e.hasEllipsis {
+ s.Elts = append(s.Elts, &ast.Ellipsis{})
+ }
+
+ ws := e.wrapCloseIfNecessary(s, src)
+ switch {
+ case len(e.conjuncts) == 0:
+ return ws
+
+ case len(e.structs) > 0, len(s.Elts) > 0:
+ e.conjuncts = append(e.conjuncts, ws)
+ }
+
+ return ast.NewBinExpr(token.AND, e.conjuncts...)
+}
+
+func (e *conjuncts) wrapCloseIfNecessary(s *ast.StructLit, v *adt.Vertex) ast.Expr {
+ if !e.hasEllipsis && v != nil {
+ if st, ok := v.BaseValue.(*adt.StructMarker); ok && st.NeedClose {
+ return ast.NewCall(ast.NewIdent("close"), s)
+ }
+ }
+ return s
+}
+
+// Conjuncts if for collecting values of a single vertex.
+type conjuncts struct {
+ *exporter
+ // Values is used to collect non-struct values.
+ values *adt.Vertex
+ embed []ast.Expr
+ conjuncts []ast.Expr
+ structs []*adt.StructInfo
+ fields map[adt.Feature]field
+ attrs []*ast.Attribute
+ hasEllipsis bool
+
+ // A value is a struct if it has a non-zero structs slice or if isData is
+ // set to true. Data vertices may not have conjuncts associated with them.
+ isData bool
+}
+
+func (c *conjuncts) addValueConjunct(src *adt.Vertex, env *adt.Environment, x adt.Elem) {
+ switch b, ok := x.(adt.BaseValue); {
+ case ok && src != nil && isTop(b) && !isTop(src.BaseValue):
+ // drop top
+ default:
+ c.values.AddConjunct(adt.MakeRootConjunct(env, x))
+ }
+}
+
+func (c *conjuncts) addConjunct(f adt.Feature, env *adt.Environment, n adt.Node) {
+ x := c.fields[f]
+ v := adt.MakeRootConjunct(env, n)
+ x.conjuncts = append(x.conjuncts, conjunct{
+ c: v,
+ up: c.top().upCount,
+ })
+ // x.upCounts = append(x.upCounts, c.top().upCount)
+ c.fields[f] = x
+}
+
+type field struct {
+ docs []*ast.CommentGroup
+ arc *adt.Vertex
+ conjuncts []conjunct
+}
+
+type conjunct struct {
+ c adt.Conjunct
+ up int32
+}
+
+func (e *conjuncts) addExpr(env *adt.Environment, src *adt.Vertex, x adt.Elem, isEmbed bool) {
+ switch x := x.(type) {
+ case *adt.StructLit:
+ e.top().upCount++
+
+ if e.cfg.ShowAttributes {
+ e.attrs = extractDeclAttrs(e.attrs, x.Src)
+ }
+
+ // Only add if it only has no bulk fields or elipsis.
+ if isComplexStruct(x) {
+ _, saved := e.pushFrame(nil)
+ e.embed = append(e.embed, e.adt(x, nil))
+ e.top().upCount-- // not necessary, but for proper form
+ e.popFrame(saved)
+ return
+ }
+ // Used for sorting.
+ e.structs = append(e.structs, &adt.StructInfo{StructLit: x, Env: env})
+
+ for _, d := range x.Decls {
+ var label adt.Feature
+ switch f := d.(type) {
+ case *adt.Field:
+ label = f.Label
+ case *adt.OptionalField:
+ // TODO: mark optional here.
+ label = f.Label
+ case *adt.Ellipsis:
+ e.hasEllipsis = true
+ continue
+ case adt.Expr:
+ e.addExpr(env, nil, f, true)
+ continue
+
+ // TODO: also handle dynamic fields
+ default:
+ panic(fmt.Sprintf("Unexpected type %T", d))
+ }
+ e.addConjunct(label, env, d)
+ }
+ e.top().upCount--
+
+ case adt.Value: // other values.
+ switch v := x.(type) {
+ case nil:
+ default:
+ e.addValueConjunct(src, env, x)
+
+ case *adt.Vertex:
+ if b, ok := v.BaseValue.(*adt.Bottom); ok {
+ if !b.IsIncomplete() || e.cfg.Final {
+ e.addExpr(env, v, b, false)
+ return
+ }
+ }
+
+ switch {
+ default:
+ for _, c := range v.Conjuncts {
+ e.addExpr(c.Env, v, c.Elem(), false)
+ }
+
+ case v.IsData():
+ e.structs = append(e.structs, v.Structs...)
+ e.isData = true
+
+ if y, ok := v.BaseValue.(adt.Value); ok {
+ e.addValueConjunct(src, env, y)
+ }
+
+ for _, a := range v.Arcs {
+ a.Finalize(e.ctx) // TODO: should we do this?
+
+ e.addConjunct(a.Label, env, a)
+ }
+ }
+ }
+
+ case *adt.BinaryExpr:
+ switch {
+ case x.Op == adt.AndOp && !isEmbed:
+ e.addExpr(env, src, x.X, false)
+ e.addExpr(env, src, x.Y, false)
+ case isSelfContained(x):
+ e.addValueConjunct(src, env, x)
+ default:
+ if isEmbed {
+ e.embed = append(e.embed, e.expr(x))
+ } else {
+ e.conjuncts = append(e.conjuncts, e.expr(x))
+ }
+ }
+
+ default:
+ switch {
+ case isSelfContained(x):
+ e.addValueConjunct(src, env, x)
+ case isEmbed:
+ e.embed = append(e.embed, e.expr(x))
+ default:
+ e.conjuncts = append(e.conjuncts, e.expr(x))
+ }
+ }
+}
+
+func isTop(x adt.BaseValue) bool {
+ switch v := x.(type) {
+ case *adt.Top:
+ return true
+ case *adt.BasicType:
+ return v.K == adt.TopKind
+ default:
+ return false
+ }
+}
+
+// TODO: find a better way to annotate optionality. Maybe a special conjunct
+// or store it in the field information?
+func isOptional(a []adt.Conjunct) bool {
+ if len(a) == 0 {
+ return false
+ }
+ for _, c := range a {
+ if v, ok := c.Elem().(*adt.Vertex); ok && !v.IsData() && len(v.Conjuncts) > 0 {
+ return isOptional(v.Conjuncts)
+ }
+ switch f := c.Source().(type) {
+ case nil:
+ return false
+ case *ast.Field:
+ if f.Optional == token.NoPos {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func isComplexStruct(s *adt.StructLit) bool {
+ for _, d := range s.Decls {
+ switch x := d.(type) {
+ case *adt.Field:
+ // TODO: remove this and also handle field annotation in expr().
+ // This allows structs to be merged. Ditto below.
+ if x.Src != nil {
+ if _, ok := x.Src.Label.(*ast.Alias); ok {
+ return ok
+ }
+ }
+
+ case *adt.OptionalField:
+ if x.Src != nil {
+ if _, ok := x.Src.Label.(*ast.Alias); ok {
+ return ok
+ }
+ }
+
+ case adt.Expr:
+
+ case *adt.Ellipsis:
+ if x.Value != nil {
+ return true
+ }
+
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+func isSelfContained(expr adt.Elem) bool {
+ switch x := expr.(type) {
+ case *adt.BinaryExpr:
+ return isSelfContained(x.X) && isSelfContained(x.Y)
+ case *adt.UnaryExpr:
+ return isSelfContained(x.X)
+ case *adt.BoundExpr:
+ return isSelfContained(x.Expr)
+ case adt.Value:
+ return true
+ }
+ return false
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/extract.go b/vendor/cuelang.org/go/internal/core/export/extract.go
new file mode 100644
index 0000000000..0a0d7d9411
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/extract.go
@@ -0,0 +1,202 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+)
+
+// ExtractDoc collects documentation strings for a field.
+//
+// Comments are attached to a field with a field shorthand belong to the
+// child node. So in the following the comment is attached to field bar.
+//
+// // comment
+// foo: bar: 2
+//
+func ExtractDoc(v *adt.Vertex) (docs []*ast.CommentGroup) {
+ return extractDocs(v, v.Conjuncts)
+}
+
+func extractDocs(v *adt.Vertex, a []adt.Conjunct) (docs []*ast.CommentGroup) {
+ fields := []*ast.Field{}
+
+ // Collect docs directly related to this Vertex.
+ for _, x := range a {
+ if v, ok := x.Elem().(*adt.Vertex); ok {
+ docs = append(docs, extractDocs(v, v.Conjuncts)...)
+ continue
+ }
+
+ switch f := x.Source().(type) {
+ case *ast.Field:
+ if hasShorthandValue(f) {
+ continue
+ }
+ fields = append(fields, f)
+ for _, cg := range f.Comments() {
+ if !containsDoc(docs, cg) && cg.Doc {
+ docs = append(docs, cg)
+ }
+ }
+
+ case *ast.File:
+ if c := internal.FileComment(f); c != nil {
+ docs = append(docs, c)
+ }
+ }
+ }
+
+ if v == nil {
+ return docs
+ }
+
+ // Collect docs from parent scopes in collapsed fields.
+ for p := v.Parent; p != nil; p = p.Parent {
+
+ newFields := []*ast.Field{}
+
+ for _, x := range p.Conjuncts {
+ f, ok := x.Source().(*ast.Field)
+ if !ok || !hasShorthandValue(f) {
+ continue
+ }
+
+ nested := nestedField(f)
+ for _, child := range fields {
+ if nested == child {
+ newFields = append(newFields, f)
+ for _, cg := range f.Comments() {
+ if !containsDoc(docs, cg) && cg.Doc {
+ docs = append(docs, cg)
+ }
+ }
+ }
+ }
+ }
+
+ fields = newFields
+ }
+ return docs
+}
+
+// hasShorthandValue reports whether this field has a struct value that will
+// be rendered as a shorthand, for instance:
+//
+// f: g: 2
+//
+func hasShorthandValue(f *ast.Field) bool {
+ if f = nestedField(f); f == nil {
+ return false
+ }
+
+ // Not a regular field, but shorthand field.
+ // TODO: Should we return here? For now mimic old implementation.
+ if _, _, err := ast.LabelName(f.Label); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// nestedField returns the child field of a field shorthand.
+func nestedField(f *ast.Field) *ast.Field {
+ s, _ := f.Value.(*ast.StructLit)
+ if s == nil ||
+ len(s.Elts) != 1 ||
+ s.Lbrace != token.NoPos ||
+ s.Rbrace != token.NoPos {
+ return nil
+ }
+
+ f, _ = s.Elts[0].(*ast.Field)
+ return f
+}
+
+func containsDoc(a []*ast.CommentGroup, cg *ast.CommentGroup) bool {
+ for _, c := range a {
+ if c == cg {
+ return true
+ }
+ }
+
+ for _, c := range a {
+ if c.Text() == cg.Text() {
+ return true
+ }
+ }
+
+ return false
+}
+
+func ExtractFieldAttrs(v *adt.Vertex) (attrs []*ast.Attribute) {
+ for _, x := range v.Conjuncts {
+ attrs = extractFieldAttrs(attrs, x)
+ }
+ return attrs
+}
+
+func extractFieldAttrs(attrs []*ast.Attribute, c adt.Conjunct) []*ast.Attribute {
+ if f, ok := c.Source().(*ast.Field); ok {
+ for _, a := range f.Attrs {
+ if !containsAttr(attrs, a) {
+ attrs = append(attrs, a)
+ }
+ }
+ }
+ return attrs
+}
+
+func ExtractDeclAttrs(v *adt.Vertex) (attrs []*ast.Attribute) {
+ for _, st := range v.Structs {
+ if src := st.StructLit; src != nil {
+ attrs = extractDeclAttrs(attrs, src.Src)
+ }
+ }
+ return attrs
+}
+
+func extractDeclAttrs(attrs []*ast.Attribute, n ast.Node) []*ast.Attribute {
+ switch x := n.(type) {
+ case nil:
+ case *ast.File:
+ info := internal.GetPackageInfo(x)
+ attrs = appendDeclAttrs(attrs, x.Decls[info.Index:])
+ case *ast.StructLit:
+ attrs = appendDeclAttrs(attrs, x.Elts)
+ }
+ return attrs
+}
+
+func appendDeclAttrs(a []*ast.Attribute, decls []ast.Decl) []*ast.Attribute {
+ for _, d := range decls {
+ if attr, ok := d.(*ast.Attribute); ok && !containsAttr(a, attr) {
+ a = append(a, attr)
+ }
+ }
+ return a
+}
+
+func containsAttr(a []*ast.Attribute, x *ast.Attribute) bool {
+ for _, e := range a {
+ if e.Text == x.Text {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/label.go b/vendor/cuelang.org/go/internal/core/export/label.go
new file mode 100644
index 0000000000..4e62d9e678
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/label.go
@@ -0,0 +1,48 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "strconv"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func (e *exporter) stringLabel(f adt.Feature) ast.Label {
+ x := f.Index()
+ switch f.Typ() {
+ case adt.IntLabel:
+ return ast.NewLit(token.INT, strconv.Itoa(int(x)))
+
+ case adt.DefinitionLabel, adt.HiddenLabel, adt.HiddenDefinitionLabel:
+ s := f.IdentString(e.ctx)
+ return ast.NewIdent(s)
+
+ case adt.StringLabel:
+ s := e.ctx.IndexToString(int64(x))
+ if f == 0 || !ast.IsValidIdent(s) ||
+ strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_") {
+ return ast.NewLit(token.STRING, literal.Label.Quote(s))
+ }
+ fallthrough
+
+ default:
+ return ast.NewIdent(e.ctx.IndexToString(int64(x)))
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/toposort.go b/vendor/cuelang.org/go/internal/core/export/toposort.go
new file mode 100644
index 0000000000..7f68df9721
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/toposort.go
@@ -0,0 +1,190 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "sort"
+
+ "cuelang.org/go/internal/core/adt"
+)
+
+// TODO: topological sort should go arguably in a more fundamental place as it
+// may be needed to sort inputs for comprehensions.
+
+// VertexFeatures returns the feature list of v. The list may include more
+// features than for which there are arcs and also includes features for
+// optional fields. It assumes the Structs fields is properly initialized.
+func VertexFeatures(c *adt.OpContext, v *adt.Vertex) []adt.Feature {
+ sets := extractFeatures(v.Structs)
+ m := sortArcs(sets) // TODO: use for convenience.
+
+ // Add features that are not in m. This may happen when fields were
+ // dynamically created.
+ var a []adt.Feature
+ for _, arc := range v.Arcs {
+ if _, ok := m[arc.Label]; !ok {
+ a = append(a, arc.Label)
+ }
+ }
+
+ sets = extractFeatures(v.Structs)
+ if len(a) > 0 {
+ sets = append(sets, a)
+ }
+
+ a = sortedArcs(sets)
+ if adt.DebugSort > 0 {
+ adt.DebugSortFields(c, a)
+ }
+ return a
+}
+
+// func structFeatures(a []*adt.StructLit) []adt.Feature {
+// sets := extractFeatures(a)
+// return sortedArcs(sets)
+// }
+
+func (e *exporter) sortedArcs(v *adt.Vertex) (sorted []*adt.Vertex) {
+ if adt.DebugSort > 0 {
+ return v.Arcs
+ }
+
+ a := extractFeatures(v.Structs)
+ if len(a) == 0 {
+ return v.Arcs
+ }
+
+ sorted = make([]*adt.Vertex, len(v.Arcs))
+ copy(sorted, v.Arcs)
+
+ m := sortArcs(a)
+ sort.SliceStable(sorted, func(i, j int) bool {
+ if m[sorted[i].Label] == 0 {
+ return m[sorted[j].Label] != 0
+ }
+ return m[sorted[i].Label] > m[sorted[j].Label]
+ })
+
+ return sorted
+}
+
+// TODO: remove
+func (e *exporter) extractFeatures(in []*adt.StructInfo) (a [][]adt.Feature) {
+ return extractFeatures(in)
+}
+
+func extractFeatures(in []*adt.StructInfo) (a [][]adt.Feature) {
+ for _, s := range in {
+ sorted := []adt.Feature{}
+ for _, e := range s.StructLit.Decls {
+ switch x := e.(type) {
+ case *adt.Field:
+ sorted = append(sorted, x.Label)
+
+ case *adt.OptionalField:
+ sorted = append(sorted, x.Label)
+ }
+ }
+
+ // Lists with a single element may still be useful to distinguish
+ // between known and unknown fields: unknown fields are sorted last.
+ if len(sorted) > 0 {
+ a = append(a, sorted)
+ }
+ }
+ return a
+}
+
+// sortedArcs is like sortArcs, but returns a the features of optional and
+// required fields in an sorted slice. Ultimately, the implementation should
+// use merge sort everywhere, and this will be the preferred method. Also,
+// when querying optional fields as well, this helps identifying the optional
+// fields.
+func sortedArcs(fronts [][]adt.Feature) []adt.Feature {
+ m := sortArcs(fronts)
+ return sortedArcsFromMap(m)
+}
+
+func sortedArcsFromMap(m map[adt.Feature]int) []adt.Feature {
+ a := make([]adt.Feature, 0, len(m))
+
+ for k := range m {
+ a = append(a, k)
+ }
+
+ sort.Slice(a, func(i, j int) bool { return m[a[i]] > m[a[j]] })
+
+ return a
+}
+
+// sortArcs does a topological sort of arcs based on a variant of Kahn's
+// algorithm. See
+// https://www.geeksforgeeks.org/topological-sorting-indegree-based-solution/
+//
+// It returns a map from feature to int where the feature with the highest
+// number should be sorted first.
+func sortArcs(fronts [][]adt.Feature) map[adt.Feature]int {
+ counts := map[adt.Feature]int{}
+ for _, a := range fronts {
+ if len(a) <= 1 {
+ continue // no dependencies
+ }
+ for _, f := range a[1:] {
+ counts[f]++
+ }
+ }
+
+ // We could use a Heap instead of simple linear search here if we are
+ // concerned about the time complexity.
+
+ index := -1
+outer:
+ for {
+ lists:
+ for i, a := range fronts {
+ for len(a) > 0 {
+ f := a[0]
+ n := counts[f]
+ if n > 0 {
+ continue lists
+ }
+
+ // advance list and decrease dependency.
+ a = a[1:]
+ fronts[i] = a
+ if len(a) > 1 && counts[a[0]] > 0 {
+ counts[a[0]]--
+ }
+
+ if n == 0 { // may be head of other lists as well
+ counts[f] = index
+ index--
+ }
+ continue outer // progress
+ }
+ }
+
+ for _, a := range fronts {
+ if len(a) > 0 {
+ // Detected a cycle. Fire at will to make progress.
+ counts[a[0]] = 0
+ continue outer
+ }
+ }
+ break
+ }
+
+ return counts
+}
diff --git a/vendor/cuelang.org/go/internal/core/export/value.go b/vendor/cuelang.org/go/internal/core/export/value.go
new file mode 100644
index 0000000000..53065e9e82
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/export/value.go
@@ -0,0 +1,483 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+ "fmt"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func (e *exporter) bareValue(v adt.Value) ast.Expr {
+ switch x := v.(type) {
+ case *adt.Vertex:
+ return e.vertex(x)
+ case adt.Value:
+ a := &adt.Vertex{BaseValue: x}
+ return e.vertex(a)
+ default:
+ panic("unreachable")
+ }
+ // TODO: allow a Value context wrapper.
+}
+
+// TODO: if the original value was a single reference, we could replace the
+// value with a reference in graph mode.
+
+func (e *exporter) vertex(n *adt.Vertex) (result ast.Expr) {
+ var attrs []*ast.Attribute
+ if e.cfg.ShowAttributes {
+ attrs = ExtractDeclAttrs(n)
+ }
+
+ s, saved := e.pushFrame(n.Conjuncts)
+ e.top().upCount++
+ defer func() {
+ e.top().upCount--
+ e.popFrame(saved)
+ }()
+
+ for _, c := range n.Conjuncts {
+ e.markLets(c.Expr().Source())
+ }
+
+ switch x := n.BaseValue.(type) {
+ case nil:
+ // bare
+ case *adt.StructMarker:
+ result = e.structComposite(n, attrs)
+
+ case *adt.ListMarker:
+ if e.showArcs(n) || attrs != nil {
+ result = e.structComposite(n, attrs)
+ } else {
+ result = e.listComposite(n)
+ }
+
+ case *adt.Bottom:
+ switch {
+ case e.cfg.ShowErrors && x.ChildError:
+ // TODO(perf): use precompiled arc statistics
+ if len(n.Arcs) > 0 && n.Arcs[0].Label.IsInt() && !e.showArcs(n) && attrs == nil {
+ result = e.listComposite(n)
+ } else {
+ result = e.structComposite(n, attrs)
+ }
+
+ case !x.IsIncomplete() || len(n.Conjuncts) == 0 || e.cfg.Final:
+ result = e.bottom(x)
+ }
+
+ case adt.Value:
+ if e.showArcs(n) || attrs != nil {
+ result = e.structComposite(n, attrs)
+ } else {
+ result = e.value(x, n.Conjuncts...)
+ }
+
+ default:
+ panic("unknown value")
+ }
+ if result == nil {
+ // fall back to expression mode
+ a := []ast.Expr{}
+ for _, c := range n.Conjuncts {
+ a = append(a, e.expr(c.Elem()))
+ }
+ result = ast.NewBinExpr(token.AND, a...)
+ }
+
+ if len(s.Elts) > 0 {
+ filterUnusedLets(s)
+ }
+ if result != s && len(s.Elts) > 0 {
+ // There are used let expressions within a non-struct.
+ // For now we just fall back to the original expressions.
+ result = e.adt(n, n.Conjuncts)
+ }
+
+ return result
+}
+
+// TODO: do something more principled. Best would be to have a similar
+// mechanism in ast.Ident as others do.
+func stripRefs(x ast.Expr) ast.Expr {
+ ast.Walk(x, nil, func(n ast.Node) {
+ switch x := n.(type) {
+ case *ast.Ident:
+ switch x.Node.(type) {
+ case *ast.ImportSpec:
+ default:
+ x.Node = nil
+ }
+ }
+ })
+ return x
+}
+
+func (e *exporter) value(n adt.Value, a ...adt.Conjunct) (result ast.Expr) {
+ if e.cfg.TakeDefaults {
+ n = adt.Default(n)
+ }
+ // Evaluate arc if needed?
+
+ // if e.concrete && !adt.IsConcrete(n.Value) {
+ // return e.errf("non-concrete value: %v", e.bareValue(n.Value))
+ // }
+
+ switch x := n.(type) {
+ case *adt.Bottom:
+ result = e.bottom(x)
+
+ case *adt.Null:
+ result = e.null(x)
+
+ case *adt.Bool:
+ result = e.bool(x)
+
+ case *adt.Num:
+ result = e.num(x, a)
+
+ case *adt.String:
+ result = e.string(x, a)
+
+ case *adt.Bytes:
+ result = e.bytes(x, a)
+
+ case *adt.BasicType:
+ result = e.basicType(x)
+
+ case *adt.Top:
+ result = ast.NewIdent("_")
+
+ case *adt.BoundValue:
+ result = e.boundValue(x)
+
+ case *adt.Builtin:
+ result = e.builtin(x)
+
+ case *adt.BuiltinValidator:
+ result = e.builtinValidator(x)
+
+ case *adt.Vertex:
+ result = e.vertex(x)
+
+ case *adt.Conjunction:
+ switch len(x.Values) {
+ case 0:
+ return ast.NewIdent("_")
+ case 1:
+ if e.cfg.Simplify {
+ return e.expr(x.Values[0])
+ }
+ return e.bareValue(x.Values[0])
+ }
+
+ a := []adt.Value{}
+ b := boundSimplifier{e: e}
+ for _, v := range x.Values {
+ if !e.cfg.Simplify || !b.add(v) {
+ a = append(a, v)
+ }
+ }
+
+ result = b.expr(e.ctx)
+ if result == nil {
+ a = x.Values
+ }
+
+ for _, x := range a {
+ result = wrapBin(result, e.bareValue(x), adt.AndOp)
+ }
+
+ case *adt.Disjunction:
+ a := []ast.Expr{}
+ for i, v := range x.Values {
+ var expr ast.Expr
+ if e.cfg.Simplify {
+ expr = e.bareValue(v)
+ } else {
+ expr = e.expr(v)
+ }
+ if i < x.NumDefaults {
+ expr = &ast.UnaryExpr{Op: token.MUL, X: expr}
+ }
+ a = append(a, expr)
+ }
+ result = ast.NewBinExpr(token.OR, a...)
+
+ default:
+ panic(fmt.Sprintf("unsupported type %T", x))
+ }
+
+ // TODO: Add comments from original.
+
+ return result
+}
+
+func (e *exporter) bottom(n *adt.Bottom) *ast.BottomLit {
+ err := &ast.BottomLit{}
+ if x := n.Err; x != nil {
+ msg := x.Error()
+ comment := &ast.Comment{Text: "// " + msg}
+ err.AddComment(&ast.CommentGroup{
+ Line: true,
+ Position: 2,
+ List: []*ast.Comment{comment},
+ })
+ }
+ return err
+}
+
+func (e *exporter) null(n *adt.Null) *ast.BasicLit {
+ return &ast.BasicLit{Kind: token.NULL, Value: "null"}
+}
+
+func (e *exporter) bool(n *adt.Bool) (b *ast.BasicLit) {
+ return ast.NewBool(n.B)
+}
+
+func extractBasic(a []adt.Conjunct) *ast.BasicLit {
+ for _, v := range a {
+ if b, ok := v.Source().(*ast.BasicLit); ok {
+ return &ast.BasicLit{Kind: b.Kind, Value: b.Value}
+ }
+ }
+ return nil
+}
+
+func (e *exporter) num(n *adt.Num, orig []adt.Conjunct) *ast.BasicLit {
+ // TODO: take original formatting into account.
+ if b := extractBasic(orig); b != nil {
+ return b
+ }
+ kind := token.FLOAT
+ if n.K&adt.IntKind != 0 {
+ kind = token.INT
+ }
+ s := n.X.String()
+ if kind == token.FLOAT && !strings.ContainsAny(s, "eE.") {
+ s += "."
+ }
+ return &ast.BasicLit{Kind: kind, Value: s}
+}
+
+func (e *exporter) string(n *adt.String, orig []adt.Conjunct) *ast.BasicLit {
+ // TODO: take original formatting into account.
+ if b := extractBasic(orig); b != nil {
+ return b
+ }
+ s := literal.String.WithOptionalTabIndent(len(e.stack)).Quote(n.Str)
+ return &ast.BasicLit{
+ Kind: token.STRING,
+ Value: s,
+ }
+}
+
+func (e *exporter) bytes(n *adt.Bytes, orig []adt.Conjunct) *ast.BasicLit {
+ // TODO: take original formatting into account.
+ if b := extractBasic(orig); b != nil {
+ return b
+ }
+ s := literal.Bytes.WithOptionalTabIndent(len(e.stack)).Quote(string(n.B))
+ return &ast.BasicLit{
+ Kind: token.STRING,
+ Value: s,
+ }
+}
+
+func (e *exporter) basicType(n *adt.BasicType) ast.Expr {
+ // TODO: allow multi-bit types?
+ return ast.NewIdent(n.K.String())
+}
+
+func (e *exporter) boundValue(n *adt.BoundValue) ast.Expr {
+ return &ast.UnaryExpr{Op: n.Op.Token(), X: e.value(n.Value)}
+}
+
+func (e *exporter) builtin(x *adt.Builtin) ast.Expr {
+ if x.Package == 0 {
+ return ast.NewIdent(x.Name)
+ }
+ spec := ast.NewImport(nil, x.Package.StringValue(e.index))
+ info, _ := astutil.ParseImportSpec(spec)
+ ident := ast.NewIdent(info.Ident)
+ ident.Node = spec
+ return ast.NewSel(ident, x.Name)
+}
+
+func (e *exporter) builtinValidator(n *adt.BuiltinValidator) ast.Expr {
+ call := ast.NewCall(e.builtin(n.Builtin))
+ for _, a := range n.Args {
+ call.Args = append(call.Args, e.value(a))
+ }
+ return call
+}
+
+func (e *exporter) listComposite(v *adt.Vertex) ast.Expr {
+ l := &ast.ListLit{}
+ for _, a := range v.Arcs {
+ if !a.Label.IsInt() {
+ continue
+ }
+ elem := e.vertex(a)
+
+ docs := ExtractDoc(a)
+ ast.SetComments(elem, docs)
+
+ l.Elts = append(l.Elts, elem)
+ }
+ m, ok := v.BaseValue.(*adt.ListMarker)
+ if !e.cfg.TakeDefaults && ok && m.IsOpen {
+ ellipsis := &ast.Ellipsis{}
+ typ := &adt.Vertex{
+ Parent: v,
+ Label: adt.AnyIndex,
+ }
+ v.MatchAndInsert(e.ctx, typ)
+ typ.Finalize(e.ctx)
+ if typ.Kind() != adt.TopKind {
+ ellipsis.Type = e.value(typ)
+ }
+
+ l.Elts = append(l.Elts, ellipsis)
+ }
+ return l
+}
+
+func (e exporter) showArcs(v *adt.Vertex) bool {
+ p := e.cfg
+ if !p.ShowHidden && !p.ShowDefinitions {
+ return false
+ }
+ for _, a := range v.Arcs {
+ switch {
+ case a.Label.IsDef() && p.ShowDefinitions:
+ return true
+ case a.Label.IsHidden() && p.ShowHidden:
+ return true
+ }
+ }
+ return false
+}
+
+func (e *exporter) structComposite(v *adt.Vertex, attrs []*ast.Attribute) ast.Expr {
+ s := e.top().scope
+
+ showRegular := false
+ switch x := v.BaseValue.(type) {
+ case *adt.StructMarker:
+ showRegular = true
+ case *adt.ListMarker:
+ // As lists may be long, put them at the end.
+ defer e.addEmbed(e.listComposite(v))
+ case *adt.Bottom:
+ if !e.cfg.ShowErrors || !x.ChildError {
+ // Should not be reachable, but just in case. The output will be
+ // correct.
+ e.addEmbed(e.value(x))
+ return s
+ }
+ // Always also show regular fields, even when list, as we are in
+ // debugging mode.
+ showRegular = true
+ // TODO(perf): do something better
+ for _, a := range v.Arcs {
+ if a.Label.IsInt() {
+ defer e.addEmbed(e.listComposite(v))
+ break
+ }
+ }
+
+ case adt.Value:
+ e.addEmbed(e.value(x))
+ }
+
+ for _, a := range attrs {
+ s.Elts = append(s.Elts, a)
+ }
+
+ p := e.cfg
+ for _, label := range VertexFeatures(e.ctx, v) {
+ show := false
+ switch label.Typ() {
+ case adt.StringLabel:
+ show = showRegular
+ case adt.IntLabel:
+ continue
+ case adt.DefinitionLabel:
+ show = p.ShowDefinitions
+ case adt.HiddenLabel, adt.HiddenDefinitionLabel:
+ show = p.ShowHidden && label.PkgID(e.ctx) == e.pkgID
+ }
+ if !show {
+ continue
+ }
+
+ f := &ast.Field{Label: e.stringLabel(label)}
+
+ e.addField(label, f, f.Value)
+
+ if label.IsDef() {
+ e.inDefinition++
+ }
+
+ arc := v.Lookup(label)
+ switch {
+ case arc == nil:
+ if !p.ShowOptional {
+ continue
+ }
+ f.Optional = token.NoSpace.Pos()
+
+ arc = &adt.Vertex{Label: label}
+ v.MatchAndInsert(e.ctx, arc)
+ if len(arc.Conjuncts) == 0 {
+ continue
+ }
+
+ // fall back to expression mode.
+ f.Value = stripRefs(e.expr(arc))
+
+ // TODO: remove use of stripRefs.
+ // f.Value = e.expr(arc)
+
+ default:
+ f.Value = e.vertex(arc)
+ }
+
+ if label.IsDef() {
+ e.inDefinition--
+ }
+
+ if p.ShowAttributes {
+ f.Attrs = ExtractFieldAttrs(arc)
+ }
+
+ if p.ShowDocs {
+ docs := ExtractDoc(arc)
+ ast.SetComments(f, docs)
+ }
+
+ s.Elts = append(s.Elts, f)
+ }
+
+ return s
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/build.go b/vendor/cuelang.org/go/internal/core/runtime/build.go
new file mode 100644
index 0000000000..d3ecac7d8b
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/build.go
@@ -0,0 +1,150 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/compile"
+)
+
+type Config struct {
+ Runtime *Runtime
+ Filename string
+ ImportPath string
+
+ compile.Config
+}
+
+// Build builds b and all its transitive dependencies, insofar they have not
+// been build yet.
+func (x *Runtime) Build(cfg *Config, b *build.Instance) (v *adt.Vertex, errs errors.Error) {
+ if err := b.Complete(); err != nil {
+ return nil, b.Err
+ }
+ if v := x.getNodeFromInstance(b); v != nil {
+ return v, b.Err
+ }
+ // TODO: clear cache of old implementation.
+ // if s := b.ImportPath; s != "" {
+ // // Use cached result, if available.
+ // if v, err := x.LoadImport(s); v != nil || err != nil {
+ // return v, err
+ // }
+ // }
+
+ errs = b.Err
+
+ // Build transitive dependencies.
+ for _, file := range b.Files {
+ file.VisitImports(func(d *ast.ImportDecl) {
+ for _, s := range d.Specs {
+ errs = errors.Append(errs, x.buildSpec(cfg, b, s))
+ }
+ })
+ }
+
+ err := x.ResolveFiles(b)
+ errs = errors.Append(errs, err)
+
+ var cc *compile.Config
+ if cfg != nil {
+ cc = &cfg.Config
+ }
+ if cfg != nil && cfg.ImportPath != "" {
+ b.ImportPath = cfg.ImportPath
+ b.PkgName = astutil.ImportPathName(b.ImportPath)
+ }
+ v, err = compile.Files(cc, x, b.ID(), b.Files...)
+ errs = errors.Append(errs, err)
+
+ if errs != nil {
+ v = adt.ToVertex(&adt.Bottom{Err: errs})
+ b.Err = errs
+ }
+
+ x.AddInst(b.ImportPath, v, b)
+
+ return v, errs
+}
+
+func dummyLoad(token.Pos, string) *build.Instance { return nil }
+
+func (r *Runtime) Compile(cfg *Config, source interface{}) (*adt.Vertex, *build.Instance) {
+ ctx := build.NewContext()
+ var filename string
+ if cfg != nil && cfg.Filename != "" {
+ filename = cfg.Filename
+ }
+ p := ctx.NewInstance(filename, dummyLoad)
+ if err := p.AddFile(filename, source); err != nil {
+ return nil, p
+ }
+ v, _ := r.Build(cfg, p)
+ return v, p
+}
+
+func (r *Runtime) CompileFile(cfg *Config, file *ast.File) (*adt.Vertex, *build.Instance) {
+ ctx := build.NewContext()
+ filename := file.Filename
+ if cfg != nil && cfg.Filename != "" {
+ filename = cfg.Filename
+ }
+ p := ctx.NewInstance(filename, dummyLoad)
+ err := p.AddSyntax(file)
+ if err != nil {
+ return nil, p
+ }
+ _, p.PkgName, _ = internal.PackageInfo(file)
+ v, _ := r.Build(cfg, p)
+ return v, p
+}
+
+func (x *Runtime) buildSpec(cfg *Config, b *build.Instance, spec *ast.ImportSpec) (errs errors.Error) {
+ info, err := astutil.ParseImportSpec(spec)
+ if err != nil {
+ return errors.Promote(err, "invalid import path")
+ }
+
+ pkg := b.LookupImport(info.ID)
+ if pkg == nil {
+ if strings.Contains(info.ID, ".") {
+ return errors.Newf(spec.Pos(),
+ "package %q imported but not defined in %s",
+ info.ID, b.ImportPath)
+ } else if x.index.builtinPaths[info.ID] == nil {
+ return errors.Newf(spec.Pos(),
+ "builtin package %q undefined", info.ID)
+ }
+ return nil
+ }
+
+ if v := x.getNodeFromInstance(pkg); v != nil {
+ return pkg.Err
+ }
+
+ if _, err := x.Build(cfg, pkg); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/errors.go b/vendor/cuelang.org/go/internal/core/runtime/errors.go
new file mode 100644
index 0000000000..e98cddac22
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/errors.go
@@ -0,0 +1,52 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+var _ errors.Error = &nodeError{}
+
+// A nodeError is an error associated with processing an AST node.
+type nodeError struct {
+ path []string // optional
+ n ast.Node
+
+ errors.Message
+}
+
+func (n *nodeError) Error() string {
+ return errors.String(n)
+}
+
+func nodeErrorf(n ast.Node, format string, args ...interface{}) *nodeError {
+ return &nodeError{
+ n: n,
+ Message: errors.NewMessage(format, args),
+ }
+}
+
+func (e *nodeError) Position() token.Pos {
+ return e.n.Pos()
+}
+
+func (e *nodeError) InputPositions() []token.Pos { return nil }
+
+func (e *nodeError) Path() []string {
+ return e.path
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/go.go b/vendor/cuelang.org/go/internal/core/runtime/go.go
new file mode 100644
index 0000000000..4b84b3fc22
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/go.go
@@ -0,0 +1,53 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "reflect"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func (x *Runtime) StoreType(t reflect.Type, src ast.Expr, expr adt.Expr) {
+ if expr == nil {
+ x.index.StoreType(t, src)
+ } else {
+ x.index.StoreType(t, expr)
+ }
+}
+
+func (x *Runtime) LoadType(t reflect.Type) (src ast.Expr, expr adt.Expr, ok bool) {
+ v, ok := x.index.LoadType(t)
+ if ok {
+ switch x := v.(type) {
+ case ast.Expr:
+ return x, nil, true
+ case adt.Expr:
+ src, _ = x.Source().(ast.Expr)
+ return src, x, true
+ }
+ }
+ return nil, nil, false
+}
+
+func (x *index) StoreType(t reflect.Type, v interface{}) {
+ x.typeCache.Store(t, v)
+}
+
+func (x *index) LoadType(t reflect.Type) (v interface{}, ok bool) {
+ v, ok = x.typeCache.Load(t)
+ return v, ok
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/imports.go b/vendor/cuelang.org/go/internal/core/runtime/imports.go
new file mode 100644
index 0000000000..7903a3639a
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/imports.go
@@ -0,0 +1,150 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "path"
+ "sync"
+
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal/core/adt"
+)
+
+type PackageFunc func(ctx adt.Runtime) (*adt.Vertex, errors.Error)
+
+func RegisterBuiltin(importPath string, f PackageFunc) {
+ sharedIndex.RegisterBuiltin(importPath, f)
+}
+
+func (x *index) RegisterBuiltin(importPath string, f PackageFunc) {
+ if x.builtinPaths == nil {
+ x.builtinPaths = map[string]PackageFunc{}
+ x.builtinShort = map[string]string{}
+ }
+ x.builtinPaths[importPath] = f
+ base := path.Base(importPath)
+ if _, ok := x.builtinShort[base]; ok {
+ importPath = "" // Don't allow ambiguous base paths.
+ }
+ x.builtinShort[base] = importPath
+}
+
+var SharedRuntime = &Runtime{index: sharedIndex}
+
+// BuiltinPackagePath converts a short-form builtin package identifier to its
+// full path or "" if this doesn't exist.
+func (x *Runtime) BuiltinPackagePath(path string) string {
+ return x.index.shortBuiltinToPath(path)
+}
+
+// sharedIndex is used for indexing builtins and any other labels common to
+// all instances.
+var sharedIndex = newIndex()
+
+// index maps conversions from label names to internal codes.
+//
+// All instances belonging to the same package should share this index.
+type index struct {
+ // lock is used to guard imports-related maps.
+ // TODO: makes these per cuecontext.
+ lock sync.RWMutex
+ imports map[*adt.Vertex]*build.Instance
+ importsByPath map[string]*adt.Vertex
+ importsByBuild map[*build.Instance]*adt.Vertex
+
+ // These are initialized during Go package initialization time and do not
+ // need to be guarded.
+ builtinPaths map[string]PackageFunc // Full path
+ builtinShort map[string]string // Commandline shorthand
+
+ typeCache sync.Map // map[reflect.Type]evaluated
+}
+
+func newIndex() *index {
+ i := &index{
+ imports: map[*adt.Vertex]*build.Instance{},
+ importsByPath: map[string]*adt.Vertex{},
+ importsByBuild: map[*build.Instance]*adt.Vertex{},
+ }
+ return i
+}
+
+func (x *index) shortBuiltinToPath(id string) string {
+ if x == nil || x.builtinPaths == nil {
+ return ""
+ }
+ return x.builtinShort[id]
+}
+
+func (r *Runtime) AddInst(path string, key *adt.Vertex, p *build.Instance) {
+ r.index.lock.Lock()
+ defer r.index.lock.Unlock()
+
+ x := r.index
+ if key == nil {
+ panic("key must not be nil")
+ }
+ x.imports[key] = p
+ x.importsByBuild[p] = key
+ if path != "" {
+ x.importsByPath[path] = key
+ }
+}
+
+func (r *Runtime) GetInstanceFromNode(key *adt.Vertex) *build.Instance {
+ r.index.lock.RLock()
+ defer r.index.lock.RUnlock()
+
+ return r.index.imports[key]
+}
+
+func (r *Runtime) getNodeFromInstance(key *build.Instance) *adt.Vertex {
+ r.index.lock.RLock()
+ defer r.index.lock.RUnlock()
+
+ return r.index.importsByBuild[key]
+}
+
+func (r *Runtime) LoadImport(importPath string) *adt.Vertex {
+ r.index.lock.Lock()
+ defer r.index.lock.Unlock()
+
+ x := r.index
+
+ key := x.importsByPath[importPath]
+ if key != nil {
+ return key
+ }
+
+ if x.builtinPaths != nil {
+ if f := x.builtinPaths[importPath]; f != nil {
+ p, err := f(r)
+ if err != nil {
+ return adt.ToVertex(&adt.Bottom{Err: err})
+ }
+ inst := &build.Instance{
+ ImportPath: importPath,
+ PkgName: path.Base(importPath),
+ }
+ x.imports[p] = inst
+ x.importsByPath[importPath] = p
+ x.importsByBuild[inst] = p
+ return p
+ }
+ }
+
+ return key
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/index.go b/vendor/cuelang.org/go/internal/core/runtime/index.go
new file mode 100644
index 0000000000..a50e37808c
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/index.go
@@ -0,0 +1,93 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "sync"
+
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func (r *Runtime) IndexToString(i int64) string {
+ return r.index.IndexToString(i)
+}
+
+func (r *Runtime) StringToIndex(s string) int64 {
+ return getKey(s)
+}
+
+func (r *Runtime) LabelStr(l adt.Feature) string {
+ return l.IdentString(r)
+}
+
+func (r *Runtime) StrLabel(str string) adt.Feature {
+ return r.Label(str, false)
+}
+
+func (r *Runtime) Label(s string, isIdent bool) adt.Feature {
+ index := r.StringToIndex(s)
+ typ := adt.StringLabel
+ if isIdent {
+ switch {
+ case internal.IsDef(s) && internal.IsHidden(s):
+ typ = adt.HiddenDefinitionLabel
+ case internal.IsDef(s):
+ typ = adt.DefinitionLabel
+ case internal.IsHidden(s):
+ typ = adt.HiddenLabel
+ }
+ }
+ f, _ := adt.MakeLabel(nil, index, typ)
+ return f
+}
+
+// TODO: move to Runtime as fields.
+var (
+ labelMap = map[string]int{}
+ labels = make([]string, 0, 1000)
+ mutex sync.RWMutex
+)
+
+func init() {
+ // Ensure label 0 is assigned to _.
+ getKey("_")
+}
+
+func getKey(s string) int64 {
+ mutex.RLock()
+ p, ok := labelMap[s]
+ mutex.RUnlock()
+ if ok {
+ return int64(p)
+ }
+ mutex.Lock()
+ defer mutex.Unlock()
+ p, ok = labelMap[s]
+ if ok {
+ return int64(p)
+ }
+ p = len(labels)
+ labels = append(labels, s)
+ labelMap[s] = p
+ return int64(p)
+}
+
+func (x *index) IndexToString(i int64) string {
+ mutex.RLock()
+ s := labels[i]
+ mutex.RUnlock()
+ return s
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/resolve.go b/vendor/cuelang.org/go/internal/core/runtime/resolve.go
new file mode 100644
index 0000000000..59ab5f224d
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/resolve.go
@@ -0,0 +1,169 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "path"
+ "strconv"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/build"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal"
+)
+
+// TODO(resolve): this is also done in compile, do we need both?
+func (r *Runtime) ResolveFiles(p *build.Instance) (errs errors.Error) {
+ idx := r.index
+
+ // Link top-level declarations. As top-level entries get unified, an entry
+ // may be linked to any top-level entry of any of the files.
+ allFields := map[string]ast.Node{}
+ for _, f := range p.Files {
+ if p := internal.GetPackageInfo(f); p.IsAnonymous() {
+ continue
+ }
+ for _, d := range f.Decls {
+ if f, ok := d.(*ast.Field); ok && f.Value != nil {
+ if ident, ok := f.Label.(*ast.Ident); ok {
+ allFields[ident.Name] = f.Value
+ }
+ }
+ }
+ }
+ for _, f := range p.Files {
+ if p := internal.GetPackageInfo(f); p.IsAnonymous() {
+ continue
+ }
+ err := resolveFile(idx, f, p, allFields)
+ errs = errors.Append(errs, err)
+ }
+ return errs
+}
+
+func resolveFile(
+ idx *index,
+ f *ast.File,
+ p *build.Instance,
+ allFields map[string]ast.Node,
+) errors.Error {
+ unresolved := map[string][]*ast.Ident{}
+ for _, u := range f.Unresolved {
+ unresolved[u.Name] = append(unresolved[u.Name], u)
+ }
+ fields := map[string]ast.Node{}
+ for _, d := range f.Decls {
+ if f, ok := d.(*ast.Field); ok && f.Value != nil {
+ if ident, ok := f.Label.(*ast.Ident); ok {
+ fields[ident.Name] = d
+ }
+ }
+ }
+ var errs errors.Error
+
+ specs := []*ast.ImportSpec{}
+
+ for _, spec := range f.Imports {
+ id, err := strconv.Unquote(spec.Path.Value)
+ if err != nil {
+ continue // quietly ignore the error
+ }
+ name := path.Base(id)
+ if imp := p.LookupImport(id); imp != nil {
+ name = imp.PkgName
+ } else if _, ok := idx.builtinPaths[id]; !ok {
+ errs = errors.Append(errs,
+ nodeErrorf(spec, "package %q not found", id))
+ continue
+ }
+ if spec.Name != nil {
+ name = spec.Name.Name
+ }
+ if n, ok := fields[name]; ok {
+ errs = errors.Append(errs, nodeErrorf(spec,
+ "%s redeclared as imported package name\n"+
+ "\tprevious declaration at %v", name, lineStr(idx, n)))
+ continue
+ }
+ fields[name] = spec
+ used := false
+ for _, u := range unresolved[name] {
+ used = true
+ u.Node = spec
+ }
+ if !used {
+ specs = append(specs, spec)
+ }
+ }
+
+ // Verify each import is used.
+ if len(specs) > 0 {
+ // Find references to imports. This assumes that identifiers in labels
+ // are not resolved or that such errors are caught elsewhere.
+ ast.Walk(f, nil, func(n ast.Node) {
+ if x, ok := n.(*ast.Ident); ok {
+ // As we also visit labels, most nodes will be nil.
+ if x.Node == nil {
+ return
+ }
+ for i, s := range specs {
+ if s == x.Node {
+ specs[i] = nil
+ return
+ }
+ }
+ }
+ })
+
+ // Add errors for unused imports.
+ for _, spec := range specs {
+ if spec == nil {
+ continue
+ }
+ if spec.Name == nil {
+ errs = errors.Append(errs, nodeErrorf(spec,
+ "imported and not used: %s", spec.Path.Value))
+ } else {
+ errs = errors.Append(errs, nodeErrorf(spec,
+ "imported and not used: %s as %s", spec.Path.Value, spec.Name))
+ }
+ }
+ }
+
+ k := 0
+ for _, u := range f.Unresolved {
+ if u.Node != nil {
+ continue
+ }
+ if n, ok := allFields[u.Name]; ok {
+ u.Node = n
+ u.Scope = f
+ continue
+ }
+ f.Unresolved[k] = u
+ k++
+ }
+ f.Unresolved = f.Unresolved[:k]
+ // TODO: also need to resolve types.
+ // if len(f.Unresolved) > 0 {
+ // n := f.Unresolved[0]
+ // return ctx.mkErr(newBase(n), "unresolved reference %s", n.Name)
+ // }
+ return errs
+}
+
+func lineStr(idx *index, n ast.Node) string {
+ return n.Pos().String()
+}
diff --git a/vendor/cuelang.org/go/internal/core/runtime/runtime.go b/vendor/cuelang.org/go/internal/core/runtime/runtime.go
new file mode 100644
index 0000000000..f456980773
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/runtime/runtime.go
@@ -0,0 +1,57 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "cuelang.org/go/cue/build"
+)
+
+// A Runtime maintains data structures for indexing and resuse for evaluation.
+type Runtime struct {
+ index *index
+
+ loaded map[*build.Instance]interface{}
+}
+
+func (r *Runtime) SetBuildData(b *build.Instance, x interface{}) {
+ r.loaded[b] = x
+}
+
+func (r *Runtime) BuildData(b *build.Instance) (x interface{}, ok bool) {
+ x, ok = r.loaded[b]
+ return x, ok
+}
+
+// New creates a new Runtime. The builtins registered with RegisterBuiltin
+// are available for
+func New() *Runtime {
+ r := &Runtime{}
+ r.Init()
+ return r
+}
+
+func (r *Runtime) Init() {
+ if r.index != nil {
+ return
+ }
+ r.index = newIndex()
+
+ // TODO: the builtin-specific instances will ultimately also not be
+ // shared by indexes.
+ r.index.builtinPaths = sharedIndex.builtinPaths
+ r.index.builtinShort = sharedIndex.builtinShort
+
+ r.loaded = map[*build.Instance]interface{}{}
+}
diff --git a/vendor/cuelang.org/go/internal/core/subsume/structural.go b/vendor/cuelang.org/go/internal/core/subsume/structural.go
new file mode 100644
index 0000000000..450a03e186
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/subsume/structural.go
@@ -0,0 +1,280 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package subsume
+
+// TODO: structural subsumption has not yet been implemented.
+
+import "cuelang.org/go/internal/core/adt"
+
+func (s *subsumer) subsumes(gt, lt adt.Conjunct) bool {
+ if gt == lt {
+ return true
+ }
+
+ // First try evaluating at the value level.
+ x, _ := gt.Expr().(adt.Value)
+ y, _ := lt.Expr().(adt.Value)
+ if x == nil {
+ // Fall back to structural.
+ return s.structural(gt, lt)
+ }
+ if y == nil {
+ return false
+ }
+
+ return s.values(x, y)
+}
+
+func (s *subsumer) conjunct(gt, lt adt.Conjunct) bool {
+ return false
+}
+
+func (s *subsumer) c(env *adt.Environment, x adt.Expr) adt.Conjunct {
+ return adt.MakeRootConjunct(env, x)
+}
+
+func isBottomConjunct(c adt.Conjunct) bool {
+ b, _ := c.Expr().(*adt.Bottom)
+ return b != nil
+}
+
+func (s *subsumer) node(env *adt.Environment, up int32) *adt.Vertex {
+ for ; up != 0; up-- {
+ env = env.Up
+ }
+ return env.Vertex
+}
+
+func (s *subsumer) structural(a, b adt.Conjunct) bool {
+ if y, ok := b.Expr().(*adt.LetReference); ok {
+ return s.conjunct(a, s.c(b.Env, y.X))
+ }
+ if isBottomConjunct(b) {
+ return true
+ }
+
+ switch x := a.Expr().(type) {
+ case *adt.DisjunctionExpr:
+
+ case *adt.StructLit:
+ case *adt.ListLit:
+
+ case *adt.FieldReference:
+ if y, ok := b.Elem().(*adt.FieldReference); ok && x.Label == y.Label {
+ if s.node(a.Env, x.UpCount) == s.node(b.Env, y.UpCount) {
+ return true
+ }
+ }
+
+ case *adt.LabelReference:
+ if y, ok := b.Elem().(*adt.LabelReference); ok {
+ if s.node(a.Env, x.UpCount) == s.node(b.Env, y.UpCount) {
+ return true
+ }
+ }
+
+ case *adt.DynamicReference:
+ if y, ok := b.Elem().(*adt.FieldReference); ok {
+ if s.node(a.Env, x.UpCount) == s.node(b.Env, y.UpCount) {
+ return true
+ }
+ }
+
+ case *adt.ImportReference:
+ if y, ok := b.Elem().(*adt.ImportReference); ok &&
+ x.ImportPath == y.ImportPath {
+ return true
+ }
+
+ case *adt.LetReference:
+ return s.conjunct(s.c(a.Env, x.X), b)
+
+ case *adt.SelectorExpr:
+ if y, ok := a.Elem().(*adt.SelectorExpr); ok &&
+ x.Sel == y.Sel &&
+ s.conjunct(s.c(a.Env, x.X), s.c(b.Env, y.X)) {
+ return true
+ }
+
+ case *adt.IndexExpr:
+ if y, ok := b.Elem().(*adt.IndexExpr); ok &&
+ s.conjunct(s.c(a.Env, x.X), s.c(b.Env, y.X)) &&
+ s.conjunct(s.c(a.Env, x.Index), s.c(b.Env, y.Index)) {
+ return true
+ }
+
+ case *adt.SliceExpr:
+ if r, ok := b.Elem().(*adt.SliceExpr); ok &&
+ s.conjunct(s.c(a.Env, x.X), s.c(b.Env, r.X)) &&
+ s.conjunct(s.c(a.Env, x.Lo), s.c(b.Env, r.Lo)) &&
+ s.conjunct(s.c(a.Env, x.Hi), s.c(b.Env, r.Hi)) {
+ return true
+ }
+
+ case *adt.Interpolation:
+ switch y := b.Elem().(type) {
+ case *adt.String:
+ // Be conservative if not ground.
+ s.inexact = true
+
+ case *adt.Interpolation:
+ // structural equivalence
+ if len(x.Parts) != len(y.Parts) {
+ return false
+ }
+ for i, p := range x.Parts {
+ if !s.conjunct(s.c(a.Env, p), s.c(b.Env, y.Parts[i])) {
+ return false
+ }
+ }
+ return true
+ }
+
+ case *adt.BoundExpr:
+ if y, ok := b.Elem().(*adt.BoundExpr); ok && x.Op == y.Op {
+ return s.conjunct(s.c(a.Env, x.Expr), s.c(b.Env, y.Expr))
+ }
+
+ case *adt.UnaryExpr:
+ if y, ok := b.Elem().(*adt.UnaryExpr); ok && x.Op == y.Op {
+ return s.conjunct(s.c(a.Env, x.X), s.c(b.Env, y.X))
+ }
+
+ case *adt.BinaryExpr:
+ if y, ok := b.Elem().(*adt.BinaryExpr); ok && x.Op == y.Op {
+ return s.conjunct(s.c(a.Env, x.X), s.c(b.Env, y.X)) &&
+ s.conjunct(s.c(a.Env, x.Y), s.c(b.Env, y.Y))
+ }
+
+ case *adt.CallExpr:
+ if y, ok := b.Elem().(*adt.CallExpr); ok {
+ if len(x.Args) != len(y.Args) {
+ return false
+ }
+ for i, arg := range x.Args {
+ if !s.conjunct(s.c(a.Env, arg), s.c(b.Env, y.Args[i])) {
+ return false
+ }
+ }
+ return s.conjunct(s.c(a.Env, x.Fun), s.c(b.Env, y.Fun))
+ }
+ }
+ return false
+}
+
+func (s *subsumer) structLit(
+ ea *adt.Environment, sa *adt.StructLit,
+ eb *adt.Environment, sb *adt.StructLit) bool {
+
+ // Create index of instance fields.
+ ca := newCollatedDecls()
+ ca.collate(ea, sa)
+
+ if ca.yielders != nil || ca.dynamic != nil {
+ // TODO: we could do structural comparison of comprehensions
+ // in many cases. For instance, an if clause would subsume
+ // structurally if it subsumes any of the if clauses in sb.
+ s.inexact = true
+ return false
+ }
+
+ cb := newCollatedDecls()
+ cb.collate(eb, sb)
+
+ if ca.hasOptional && !s.IgnoreOptional {
+ // TODO: same argument here as for comprehensions. This could
+ // be made to work.
+ if ca.pattern != nil || ca.dynamic != nil {
+ s.inexact = true
+ return false
+ }
+
+ // for f, b := range cb.fields {
+ // if !b.required || f.IsDef() {
+ // continue
+ // }
+ // name := ctx.LabelStr(b.Label)
+ // arg := &stringLit{x.baseValue, name, nil}
+ // u, _ := x.optionals.constraint(ctx, arg)
+ // if u != nil && !s.subsumes(u, b.v) {
+ // return false
+ // }
+ // }
+
+ }
+
+ return false
+
+}
+
+// collatedDecls is used to compute the structural subsumption of two
+// struct literals.
+type collatedDecls struct {
+ fields map[adt.Feature]field
+ yielders []adt.Yielder
+ pattern []*adt.BulkOptionalField
+ dynamic []*adt.DynamicField
+ values []adt.Expr
+ additional []*adt.Ellipsis
+ isOpen bool
+ hasOptional bool
+}
+
+func newCollatedDecls() *collatedDecls {
+ return &collatedDecls{fields: map[adt.Feature]field{}}
+}
+
+type field struct {
+ required bool
+ conjuncts []adt.Conjunct
+}
+
+func (c *collatedDecls) collate(env *adt.Environment, s *adt.StructLit) {
+ for _, d := range s.Decls {
+ switch x := d.(type) {
+ case *adt.Field:
+ e := c.fields[x.Label]
+ e.required = true
+ e.conjuncts = append(e.conjuncts, adt.MakeRootConjunct(env, x))
+ c.fields[x.Label] = e
+
+ case *adt.OptionalField:
+ e := c.fields[x.Label]
+ e.conjuncts = append(e.conjuncts, adt.MakeRootConjunct(env, x))
+ c.fields[x.Label] = e
+ c.hasOptional = true
+
+ case *adt.BulkOptionalField:
+ c.pattern = append(c.pattern, x)
+ c.hasOptional = true
+
+ case *adt.DynamicField:
+ c.dynamic = append(c.dynamic, x)
+ c.hasOptional = true
+
+ case *adt.Ellipsis:
+ c.isOpen = true
+ c.additional = append(c.additional, x)
+
+ case *adt.Comprehension:
+ c.yielders = append(c.yielders, x.Clauses)
+
+ case *adt.LetClause:
+ c.yielders = append(c.yielders, x)
+
+ case *adt.ValueClause:
+ }
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/subsume/subsume.go b/vendor/cuelang.org/go/internal/core/subsume/subsume.go
new file mode 100644
index 0000000000..82f522457b
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/subsume/subsume.go
@@ -0,0 +1,144 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package subsume defines various subsumption relations.
+package subsume
+
+import (
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal"
+ "cuelang.org/go/internal/core/adt"
+)
+
+// Profile configures the type of subsumption. One should typically use one
+// of the preconfigured profiles.
+type Profile struct {
+ // Final indicates subsumption should only consider fields that are relevant
+ // to data mode, and ignore definitions, hidden fields, pattern constraints
+ // and additional constraints.
+ Final bool
+
+ // Defaults indicate that default values should be used for the subsumed
+ // value.
+ Defaults bool
+
+ // LeftDefaults indicates that the default value of the subsuming value
+ // needs to be taken. This is necessary for simplifications like trim
+ // and simplifying disjunctions.
+ LeftDefault bool
+
+ // Ignore optional fields.
+ IgnoreOptional bool
+
+ // IgnoreClosedness ignores closedness of structs and is used for comparing
+ // APIs.
+ IgnoreClosedness bool
+}
+
+var Simplify = Profile{
+ LeftDefault: true,
+}
+
+var CUE = Profile{}
+
+// Final checks subsumption interpreting the subsumed value as data.
+var Final = Profile{
+ Final: true,
+ Defaults: true,
+}
+
+// FinalOpen exists as an artifact of the old API. One should probably not use
+// this.
+var FinalOpen = Profile{
+ Final: true,
+ Defaults: true,
+ IgnoreClosedness: true,
+}
+
+// API is subsumption used for APIs.
+var API = Profile{
+ IgnoreClosedness: true,
+}
+
+// Value subsumes two values based on their logical (evaluated) values.
+func Value(ctx *adt.OpContext, a, b adt.Value) errors.Error {
+ return CUE.Value(ctx, a, b)
+}
+
+func (p *Profile) Value(ctx *adt.OpContext, a, b adt.Value) errors.Error {
+ s := subsumer{ctx: ctx, Profile: *p}
+ if !s.values(a, b) {
+ return s.getError()
+ }
+ return nil // ignore errors here even if there are some.
+}
+
+// Check reports whether b is an instance of a.
+func (p *Profile) Check(ctx *adt.OpContext, a, b adt.Value) bool {
+ s := subsumer{ctx: ctx, Profile: *p}
+ return s.values(a, b)
+}
+
+func isBottom(x adt.Node) bool {
+ b, _ := x.(*adt.Bottom)
+ return b != nil
+}
+
+type subsumer struct {
+ ctx *adt.OpContext
+ errs errors.Error
+
+ Profile
+
+ inexact bool // If true, the result could be a false negative.
+ missing adt.Feature
+ gt adt.Value
+ lt adt.Value
+}
+
+func (s *subsumer) errf(msg string, args ...interface{}) {
+ b := s.ctx.NewErrf(msg, args...)
+ s.errs = errors.Append(s.errs, b.Err)
+}
+
+func unifyValue(c *adt.OpContext, a, b adt.Value) adt.Value {
+ v := &adt.Vertex{}
+ v.AddConjunct(adt.MakeRootConjunct(c.Env(0), a))
+ v.AddConjunct(adt.MakeRootConjunct(c.Env(0), b))
+ x, _ := c.Evaluate(c.Env(0), v)
+ return x
+}
+
+func (s *subsumer) getError() (err errors.Error) {
+ c := s.ctx
+ // src := binSrc(token.NoPos, opUnify, gt, lt)
+ if s.gt != nil && s.lt != nil {
+ // src := binSrc(token.NoPos, opUnify, s.gt, s.lt)
+ if s.missing != 0 {
+ s.errf("missing field %q", s.missing.SelectorString(c))
+ } else if b, ok := unifyValue(c, s.gt, s.lt).(*adt.Bottom); !ok {
+ s.errf("value not an instance")
+ } else {
+ s.errs = errors.Append(s.errs, b.Err)
+ }
+ }
+ if s.errs == nil {
+ s.errf("value not an instance")
+ }
+ err = s.errs
+ if s.inexact {
+ err = internal.DecorateError(internal.ErrInexact, err)
+ }
+ return err
+}
diff --git a/vendor/cuelang.org/go/internal/core/subsume/value.go b/vendor/cuelang.org/go/internal/core/subsume/value.go
new file mode 100644
index 0000000000..bea61af90e
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/subsume/value.go
@@ -0,0 +1,311 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package subsume
+
+import (
+ "bytes"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal/core/adt"
+)
+
+func (s *subsumer) values(a, b adt.Value) (result bool) {
+ defer func() {
+ if !result && s.gt == nil && s.lt == nil {
+ s.gt = a
+ s.lt = b
+ }
+ }()
+
+ if a == b {
+ return true
+ }
+
+ if s.Defaults {
+ b = adt.Default(b)
+ }
+
+ switch b := b.(type) {
+ case *adt.Bottom:
+ // If the value is incomplete, the error is not final. So either check
+ // structural equivalence or return an error.
+ return !b.IsIncomplete()
+
+ case *adt.Vertex:
+ if a, ok := a.(*adt.Vertex); ok {
+ return s.vertices(a, b)
+ }
+ if v, ok := b.BaseValue.(adt.Value); ok {
+ // Safe to ignore arcs of w.
+ return s.values(a, v)
+ }
+ // Check based on first value.
+
+ case *adt.Conjunction:
+ if _, ok := a.(*adt.Conjunction); ok {
+ break
+ }
+ for _, y := range b.Values {
+ if s.values(a, y) {
+ return true
+ }
+ }
+ return false
+
+ case *adt.Disjunction:
+ if _, ok := a.(*adt.Disjunction); ok {
+ break
+ }
+
+ for _, y := range b.Values {
+ if !s.values(a, y) {
+ return false
+ }
+ }
+ return true
+
+ case *adt.NodeLink:
+ // Do not descend into NodeLinks to avoid processing cycles.
+ // TODO: this would work better if all equal nodes shared the same
+ // node link.
+ return deref(a) == deref(b)
+ }
+
+ switch x := a.(type) {
+ case *adt.Top:
+ return true
+
+ case *adt.Bottom:
+ // isBottom(b) was already tested above.
+ return false
+
+ case *adt.BasicType:
+ k := b.Kind()
+ return x.K&k == k
+
+ case *adt.BoundValue:
+ return s.bound(x, b)
+
+ case *adt.Builtin:
+ return x == b
+
+ case *adt.BuiltinValidator:
+ if y := s.ctx.Validate(x, b); y != nil {
+ s.errs = errors.Append(s.errs, y.Err)
+ return false
+ }
+ return true
+
+ case *adt.Null:
+ return b.Kind() == adt.NullKind
+
+ case *adt.Bool:
+ y, ok := b.(*adt.Bool)
+ return ok && x.B == y.B
+
+ case *adt.Num:
+ y, ok := b.(*adt.Num)
+ return ok && x.K&y.K == y.K && test(s.ctx, x, adt.EqualOp, x, y)
+
+ case *adt.String:
+ y, ok := b.(*adt.String)
+ return ok && x.Str == y.Str
+
+ case *adt.Bytes:
+ y, ok := b.(*adt.Bytes)
+ return ok && bytes.Equal(x.B, y.B)
+
+ case *adt.Vertex:
+ y, ok := b.(*adt.Vertex)
+ if ok {
+ return s.vertices(x, y)
+ }
+
+ // TODO: Under what conditions can we cast to the value?
+ if v, _ := x.BaseValue.(adt.Value); v != nil {
+ return s.values(v, b)
+ }
+ return false
+
+ case *adt.Conjunction:
+ if y, ok := b.(*adt.Conjunction); ok {
+ // A Conjunction subsumes another Conjunction if for all values a in
+ // x there is a value b in y such that a subsumes b.
+ //
+ // This assumes overlapping ranges in disjunctions are merged.If
+ // this is not the case, subsumes will return a false negative,
+ // which is allowed.
+ outerC:
+ for _, a := range x.Values {
+ for _, b := range y.Values {
+ if s.values(a, b) {
+ continue outerC
+ }
+ }
+ // TODO: should this be marked as inexact?
+ return false
+ }
+ return true
+ }
+ subsumed := true
+ for _, a := range x.Values {
+ subsumed = subsumed && s.values(a, b)
+ }
+ return subsumed
+
+ case *adt.Disjunction:
+
+ if s.LeftDefault {
+ a = adt.Default(a)
+ var ok bool
+ x, ok = a.(*adt.Disjunction)
+ if !ok {
+ return s.values(a, b)
+ }
+ }
+
+ // A Disjunction subsumes another Disjunction if all values of y are
+ // subsumed by any of the values of x, and default values in y are
+ // subsumed by the default values of x.
+ //
+ // This assumes that overlapping ranges in x are merged. If this is not
+ // the case, subsumes will return a false negative, which is allowed.
+ if y, ok := b.(*adt.Disjunction); ok {
+ // at least one value in x should subsume each value in d.
+ outerD:
+ for i, b := range y.Values {
+ bDefault := i < y.NumDefaults
+ // v is subsumed if any value in x subsumes v.
+ for j, a := range x.Values {
+ aDefault := j < x.NumDefaults
+ if (aDefault || !bDefault) && s.values(a, b) {
+ continue outerD
+ }
+ }
+ return false
+ }
+ return true
+ }
+ // b is subsumed if any value in x subsumes b.
+ for _, a := range x.Values {
+ if s.values(a, b) {
+ return true
+ }
+ }
+ // TODO: should this be marked as inexact?
+ return false
+
+ case *adt.NodeLink:
+ return deref(x) == deref(b)
+ }
+ return false
+}
+
+func deref(v adt.Expr) *adt.Vertex {
+ switch x := v.(type) {
+ case *adt.Vertex:
+ return x
+ case *adt.NodeLink:
+ return x.Node
+ }
+ return nil
+}
+
+func (s *subsumer) bound(x *adt.BoundValue, v adt.Value) bool {
+ ctx := s.ctx
+ if isBottom(v) {
+ return true
+ }
+
+ switch y := v.(type) {
+ case *adt.BoundValue:
+ if !adt.IsConcrete(y.Value) {
+ return false
+ }
+
+ kx := x.Kind()
+ ky := y.Kind()
+ if (kx&ky)&^kx != 0 {
+ return false
+ }
+ // x subsumes y if
+ // x: >= a, y: >= b ==> a <= b
+ // x: >= a, y: > b ==> a <= b
+ // x: > a, y: > b ==> a <= b
+ // x: > a, y: >= b ==> a < b
+ //
+ // x: <= a, y: <= b ==> a >= b
+ //
+ // x: != a, y: != b ==> a != b
+ //
+ // false if types or op direction doesn't match
+
+ xv := x.Value
+ yv := y.Value
+ switch x.Op {
+ case adt.GreaterThanOp:
+ if y.Op == adt.GreaterEqualOp {
+ return test(ctx, x, adt.LessThanOp, xv, yv)
+ }
+ fallthrough
+ case adt.GreaterEqualOp:
+ if y.Op == adt.GreaterThanOp || y.Op == adt.GreaterEqualOp {
+ return test(ctx, x, adt.LessEqualOp, xv, yv)
+ }
+ case adt.LessThanOp:
+ if y.Op == adt.LessEqualOp {
+ return test(ctx, x, adt.GreaterThanOp, xv, yv)
+ }
+ fallthrough
+ case adt.LessEqualOp:
+ if y.Op == adt.LessThanOp || y.Op == adt.LessEqualOp {
+ return test(ctx, x, adt.GreaterEqualOp, xv, yv)
+ }
+ case adt.NotEqualOp:
+ switch y.Op {
+ case adt.NotEqualOp:
+ return test(ctx, x, adt.EqualOp, xv, yv)
+ case adt.GreaterEqualOp:
+ return test(ctx, x, adt.LessThanOp, xv, yv)
+ case adt.GreaterThanOp:
+ return test(ctx, x, adt.LessEqualOp, xv, yv)
+ case adt.LessThanOp:
+ return test(ctx, x, adt.GreaterEqualOp, xv, yv)
+ case adt.LessEqualOp:
+ return test(ctx, x, adt.GreaterThanOp, xv, yv)
+ }
+
+ case adt.MatchOp, adt.NotMatchOp:
+ // these are just approximations
+ if y.Op == x.Op {
+ return test(ctx, x, adt.EqualOp, xv, yv)
+ }
+
+ default:
+ // adt.NotEqualOp already handled above.
+ panic("cue: undefined bound mode")
+ }
+
+ case *adt.Num, *adt.String, *adt.Bool:
+ return test(ctx, x, x.Op, y, x.Value)
+ }
+ return false
+}
+
+func test(ctx *adt.OpContext, src adt.Node, op adt.Op, gt, lt adt.Value) bool {
+ x := adt.BinOp(ctx, op, gt, lt)
+ b, ok := x.(*adt.Bool)
+ return ok && b.B
+}
diff --git a/vendor/cuelang.org/go/internal/core/subsume/vertex.go b/vendor/cuelang.org/go/internal/core/subsume/vertex.go
new file mode 100644
index 0000000000..55f83dcff9
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/subsume/vertex.go
@@ -0,0 +1,260 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package subsume
+
+import (
+ "fmt"
+
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/export"
+)
+
+// Notes:
+// - Can optional fields of y can always be ignored here? Maybe not in the
+// schema case.
+// - Definitions of y can be ignored in data mode.
+//
+// TODO(perf): use merge sort where possible.
+func (s *subsumer) vertices(x, y *adt.Vertex) bool {
+ if x == y {
+ return true
+ }
+
+ if s.Defaults {
+ y = y.Default()
+ }
+
+ if b, _ := y.BaseValue.(*adt.Bottom); b != nil {
+ // If the value is incomplete, the error is not final. So either check
+ // structural equivalence or return an error.
+ return !b.IsIncomplete()
+ }
+
+ ctx := s.ctx
+
+ final := y.IsData() || s.Final
+
+ switch v := x.BaseValue.(type) {
+ case *adt.Bottom:
+ return false
+
+ case *adt.ListMarker:
+ if !y.IsList() {
+ s.errf("list does not subsume %s (type %s)", y, y.Kind())
+ return false
+ }
+ if !s.listVertices(x, y) {
+ return false
+ }
+ // TODO: allow other arcs alongside list arc.
+ return true
+
+ case *adt.StructMarker:
+ _, ok := y.BaseValue.(*adt.StructMarker)
+ if !ok {
+ return false
+ }
+
+ case adt.Value:
+ if !s.values(v, y.Value()) {
+ return false
+ }
+
+ // Embedded scalars could still have arcs.
+ if final {
+ return true
+ }
+
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+
+ xClosed := x.IsClosedStruct() && !s.IgnoreClosedness
+ // TODO: this should not close for taking defaults. Do a more principled
+ // makeover of this package before making it public, though.
+ yClosed := s.Final || s.Defaults ||
+ (y.IsClosedStruct() && !s.IgnoreClosedness)
+
+ if xClosed && !yClosed && !final {
+ return false
+ }
+
+ types := x.OptionalTypes()
+ if !final && !s.IgnoreOptional && types&(adt.HasPattern|adt.HasAdditional) != 0 {
+ // TODO: there are many cases where pattern constraints can be checked.
+ s.inexact = true
+ return false
+ }
+
+ // All arcs in x must exist in y and its values must subsume.
+ xFeatures := export.VertexFeatures(s.ctx, x)
+ for _, f := range xFeatures {
+ if s.Final && !f.IsRegular() {
+ continue
+ }
+
+ a := x.Lookup(f)
+ aOpt := false
+ if a == nil {
+ // x.f is optional
+ if s.IgnoreOptional {
+ continue
+ }
+
+ a = &adt.Vertex{Label: f}
+ x.MatchAndInsert(ctx, a)
+ a.Finalize(ctx)
+
+ // If field a is optional and has value top, neither the
+ // omission of the field nor the field defined with any value
+ // may cause unification to fail.
+ if a.Kind() == adt.TopKind {
+ continue
+ }
+
+ aOpt = true
+ }
+
+ b := y.Lookup(f)
+ if b == nil {
+ // y.f is optional
+ if !aOpt {
+ s.errf("required field is optional in subsumed value: %s", f)
+ return false
+ }
+
+ // If f is undefined for y and if y is closed, the field is
+ // implicitly defined as _|_ and thus subsumed. Technically, this is
+ // even true if a is not optional, but in that case it means that y
+ // is invalid, so return false regardless
+ if !y.Accept(ctx, f) || y.IsData() || s.Final {
+ continue
+ }
+
+ b = &adt.Vertex{Label: f}
+ y.MatchAndInsert(ctx, b)
+ b.Finalize(ctx)
+ }
+
+ if s.values(a, b) {
+ continue
+ }
+
+ s.missing = f
+ s.gt = a
+ s.lt = y
+
+ s.errf("field %s not present in %s", f, y)
+ return false
+ }
+
+ if xClosed && !yClosed && !s.Final {
+ s.errf("closed struct does not subsume open struct")
+ return false
+ }
+
+ yFeatures := export.VertexFeatures(s.ctx, y)
+outer:
+ for _, f := range yFeatures {
+ if s.Final && !f.IsRegular() {
+ continue
+ }
+
+ for _, g := range xFeatures {
+ if g == f {
+ // already validated
+ continue outer
+ }
+ }
+
+ b := y.Lookup(f)
+ if b == nil {
+ if s.IgnoreOptional || s.Final {
+ continue
+ }
+
+ b = &adt.Vertex{Label: f}
+ y.MatchAndInsert(ctx, b)
+ }
+
+ if !x.Accept(ctx, f) {
+ if s.Profile.IgnoreClosedness {
+ continue
+ }
+ s.errf("field not allowed in closed struct: %s", f)
+ return false
+ }
+
+ a := &adt.Vertex{Label: f}
+ x.MatchAndInsert(ctx, a)
+ if len(a.Conjuncts) == 0 {
+ // It is accepted and has no further constraints, so all good.
+ continue
+ }
+
+ a.Finalize(ctx)
+ b.Finalize(ctx)
+
+ if !s.vertices(a, b) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (s *subsumer) listVertices(x, y *adt.Vertex) bool {
+ ctx := s.ctx
+
+ if !y.IsData() && x.IsClosedList() && !y.IsClosedList() {
+ return false
+ }
+
+ xElems := x.Elems()
+ yElems := y.Elems()
+
+ switch {
+ case len(xElems) == len(yElems):
+ case len(xElems) > len(yElems):
+ return false
+ case x.IsClosedList():
+ return false
+ default:
+ a := &adt.Vertex{Label: adt.AnyIndex}
+ x.MatchAndInsert(ctx, a)
+ a.Finalize(ctx)
+
+ // x must be open
+ for _, b := range yElems[len(xElems):] {
+ if !s.vertices(a, b) {
+ return false
+ }
+ }
+
+ if !y.IsClosedList() {
+ b := &adt.Vertex{Label: adt.AnyIndex}
+ y.MatchAndInsert(ctx, b)
+ b.Finalize(ctx)
+ }
+ }
+
+ for i, a := range xElems {
+ if !s.vertices(a, yElems[i]) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/cuelang.org/go/internal/core/validate/validate.go b/vendor/cuelang.org/go/internal/core/validate/validate.go
new file mode 100644
index 0000000000..132c216dfa
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/validate/validate.go
@@ -0,0 +1,112 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package validate collects errors from an evaluated Vertex.
+package validate
+
+import (
+ "cuelang.org/go/internal/core/adt"
+)
+
+type Config struct {
+ // Concrete, if true, requires that all values be concrete.
+ Concrete bool
+
+ // DisallowCycles indicates that there may not be cycles.
+ DisallowCycles bool
+
+ // AllErrors continues descending into a Vertex, even if errors are found.
+ AllErrors bool
+
+ // TODO: omitOptional, if this is becomes relevant.
+}
+
+// Validate checks that a value has certain properties. The value must have
+// been evaluated.
+func Validate(ctx *adt.OpContext, v *adt.Vertex, cfg *Config) *adt.Bottom {
+ if cfg == nil {
+ cfg = &Config{}
+ }
+ x := validator{Config: *cfg, ctx: ctx}
+ x.validate(v)
+ return x.err
+}
+
+type validator struct {
+ Config
+ ctx *adt.OpContext
+ err *adt.Bottom
+ inDefinition int
+}
+
+func (v *validator) checkConcrete() bool {
+ return v.Concrete && v.inDefinition == 0
+}
+
+func (v *validator) add(b *adt.Bottom) {
+ if !v.AllErrors {
+ v.err = adt.CombineErrors(nil, v.err, b)
+ return
+ }
+ if !b.ChildError {
+ v.err = adt.CombineErrors(nil, v.err, b)
+ }
+}
+
+func (v *validator) validate(x *adt.Vertex) {
+ defer v.ctx.PopArc(v.ctx.PushArc(x))
+
+ if b, _ := x.BaseValue.(*adt.Bottom); b != nil {
+ switch b.Code {
+ case adt.CycleError:
+ if v.checkConcrete() || v.DisallowCycles {
+ v.add(b)
+ }
+
+ case adt.IncompleteError:
+ if v.checkConcrete() {
+ v.add(b)
+ }
+
+ default:
+ v.add(b)
+ }
+ if !b.HasRecursive {
+ return
+ }
+
+ } else if v.checkConcrete() {
+ x = x.Default()
+ if !adt.IsConcrete(x) {
+ x := x.Value()
+ v.add(&adt.Bottom{
+ Code: adt.IncompleteError,
+ Err: v.ctx.Newf("incomplete value %v", x),
+ })
+ }
+ }
+
+ for _, a := range x.Arcs {
+ if !v.AllErrors && v.err != nil {
+ break
+ }
+ if a.Label.IsRegular() {
+ v.validate(a)
+ } else {
+ v.inDefinition++
+ v.validate(a)
+ v.inDefinition--
+ }
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/walk/walk.go b/vendor/cuelang.org/go/internal/core/walk/walk.go
new file mode 100644
index 0000000000..3a12139f97
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/walk/walk.go
@@ -0,0 +1,191 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// walk provides functions for visiting the nodes of an ADT tree.
+package walk
+
+import (
+ "fmt"
+
+ "cuelang.org/go/internal/core/adt"
+)
+
+// Features calls f for all features used in x and indicates whether the
+// feature is used as a reference or not.
+func Features(x adt.Expr, f func(label adt.Feature, src adt.Node)) {
+ w := Visitor{
+ Feature: f,
+ }
+ w.Elem(x)
+}
+
+type Visitor struct {
+ // TODO: lets really should be special fields
+ letDone map[adt.Expr]bool
+
+ Feature func(f adt.Feature, src adt.Node)
+ Before func(adt.Node) bool
+}
+
+func (w *Visitor) init() {
+ if w.letDone == nil {
+ w.letDone = map[adt.Expr]bool{}
+ }
+}
+
+func (w *Visitor) Elem(x adt.Elem) {
+ w.init()
+ w.node(x)
+}
+
+func (w *Visitor) feature(x adt.Feature, src adt.Node) {
+ if w.Feature != nil {
+ w.Feature(x, src)
+ }
+}
+
+func (w *Visitor) node(n adt.Node) {
+ if w.Before != nil && !w.Before(n) {
+ return
+ }
+
+ switch x := n.(type) {
+ case nil:
+
+ // TODO: special-case Vertex?
+ case adt.Value:
+
+ case *adt.ListLit:
+ for _, x := range x.Elems {
+ w.node(x)
+ }
+
+ case *adt.StructLit:
+ for _, x := range x.Decls {
+ w.node(x)
+ }
+
+ case *adt.FieldReference:
+ w.feature(x.Label, x)
+
+ case *adt.ValueReference:
+ w.feature(x.Label, x)
+
+ case *adt.LabelReference:
+
+ case *adt.DynamicReference:
+
+ case *adt.ImportReference:
+ w.feature(x.ImportPath, x)
+ w.feature(x.Label, x)
+
+ case *adt.LetReference:
+ w.feature(x.Label, x)
+ if w.letDone == nil {
+ w.letDone = map[adt.Expr]bool{}
+ }
+ if !w.letDone[x.X] {
+ w.letDone[x.X] = true
+ w.node(x.X)
+ }
+
+ case *adt.SelectorExpr:
+ w.node(x.X)
+ w.feature(x.Sel, x)
+
+ case *adt.IndexExpr:
+ w.node(x.X)
+ w.node(x.Index)
+
+ case *adt.SliceExpr:
+ w.node(x.X)
+ w.node(x.Lo)
+ w.node(x.Hi)
+ w.node(x.Stride)
+
+ case *adt.Interpolation:
+ for _, x := range x.Parts {
+ w.node(x)
+ }
+
+ case *adt.BoundExpr:
+ w.node(x.Expr)
+
+ case *adt.UnaryExpr:
+ w.node(x.X)
+
+ case *adt.BinaryExpr:
+ w.node(x.X)
+ w.node(x.Y)
+
+ case *adt.CallExpr:
+ w.node(x.Fun)
+ for _, arg := range x.Args {
+ w.node(arg)
+ }
+
+ case *adt.DisjunctionExpr:
+ for _, d := range x.Values {
+ w.node(d.Val)
+ }
+
+ // Fields
+
+ case *adt.Ellipsis:
+ if x.Value != nil {
+ w.node(x.Value)
+ }
+
+ case *adt.Field:
+ w.feature(x.Label, x)
+ w.node(x.Value)
+
+ case *adt.OptionalField:
+ w.feature(x.Label, x)
+ w.node(x.Value)
+
+ case *adt.BulkOptionalField:
+ w.node(x.Filter)
+ w.node(x.Value)
+
+ case *adt.DynamicField:
+ w.node(x.Key)
+ w.node(x.Value)
+
+ // Yielders
+
+ case *adt.Comprehension:
+ w.node(x.Clauses)
+ w.node(x.Value)
+
+ case *adt.ForClause:
+ w.feature(x.Key, x)
+ w.feature(x.Value, x)
+ w.node(x.Dst)
+
+ case *adt.IfClause:
+ w.node(x.Condition)
+ w.node(x.Dst)
+
+ case *adt.LetClause:
+ w.feature(x.Label, x)
+ w.node(x.Expr)
+ w.node(x.Dst)
+
+ case *adt.ValueClause:
+
+ default:
+ panic(fmt.Sprintf("unknown field %T", x))
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go
new file mode 100644
index 0000000000..7eaf55c8cb
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go
@@ -0,0 +1,379 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "math/big"
+ "regexp"
+ "strings"
+
+ "gopkg.in/yaml.v3"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/astinternal"
+)
+
+// Encode converts a CUE AST to YAML.
+//
+// The given file must only contain values that can be directly supported by
+// YAML:
+// Type Restrictions
+// BasicLit
+// File no imports, aliases, or definitions
+// StructLit no embeddings, aliases, or definitions
+// List
+// Field must be regular; label must be a BasicLit or Ident
+// CommentGroup
+//
+// TODO: support anchors through Ident.
+func Encode(n ast.Node) (b []byte, err error) {
+ y, err := encode(n)
+ if err != nil {
+ return nil, err
+ }
+ w := &bytes.Buffer{}
+ enc := yaml.NewEncoder(w)
+ // Use idiomatic indentation.
+ enc.SetIndent(2)
+ if err = enc.Encode(y); err != nil {
+ return nil, err
+ }
+ return w.Bytes(), nil
+}
+
+func encode(n ast.Node) (y *yaml.Node, err error) {
+ switch x := n.(type) {
+ case *ast.BasicLit:
+ y, err = encodeScalar(x)
+
+ case *ast.ListLit:
+ y, err = encodeExprs(x.Elts)
+ line := x.Lbrack.Line()
+ if err == nil && line > 0 && line == x.Rbrack.Line() {
+ y.Style = yaml.FlowStyle
+ }
+
+ case *ast.StructLit:
+ y, err = encodeDecls(x.Elts)
+ line := x.Lbrace.Line()
+ if err == nil && line > 0 && line == x.Rbrace.Line() {
+ y.Style = yaml.FlowStyle
+ }
+
+ case *ast.File:
+ y, err = encodeDecls(x.Decls)
+
+ case *ast.UnaryExpr:
+ b, ok := x.X.(*ast.BasicLit)
+ if ok && x.Op == token.SUB && (b.Kind == token.INT || b.Kind == token.FLOAT) {
+ y, err = encodeScalar(b)
+ if !strings.HasPrefix(y.Value, "-") {
+ y.Value = "-" + y.Value
+ break
+ }
+ }
+ return nil, errors.Newf(x.Pos(), "yaml: unsupported node %s (%T)", astinternal.DebugStr(x), x)
+ default:
+ return nil, errors.Newf(x.Pos(), "yaml: unsupported node %s (%T)", astinternal.DebugStr(x), x)
+ }
+ if err != nil {
+ return nil, err
+ }
+ addDocs(n, y, y)
+ return y, nil
+}
+
+func encodeScalar(b *ast.BasicLit) (n *yaml.Node, err error) {
+ n = &yaml.Node{Kind: yaml.ScalarNode}
+
+ // TODO: use cue.Value and support attributes for setting YAML tags.
+
+ switch b.Kind {
+ case token.INT:
+ var x big.Int
+ if err := setNum(n, b.Value, &x); err != nil {
+ return nil, err
+ }
+
+ case token.FLOAT:
+ var x big.Float
+ if err := setNum(n, b.Value, &x); err != nil {
+ return nil, err
+ }
+
+ case token.TRUE, token.FALSE, token.NULL:
+ n.Value = b.Value
+
+ case token.STRING:
+ info, nStart, _, err := literal.ParseQuotes(b.Value, b.Value)
+ if err != nil {
+ return nil, err
+ }
+ str, err := info.Unquote(b.Value[nStart:])
+ if err != nil {
+ panic(fmt.Sprintf("invalid string: %v", err))
+ }
+ n.SetString(str)
+
+ switch {
+ case !info.IsDouble():
+ n.Tag = "!!binary"
+ n.Value = base64.StdEncoding.EncodeToString([]byte(str))
+
+ case info.IsMulti():
+ // Preserve multi-line format.
+ n.Style = yaml.LiteralStyle
+
+ default:
+ if shouldQuote(str) {
+ n.Style = yaml.DoubleQuotedStyle
+ }
+ }
+
+ default:
+ return nil, errors.Newf(b.Pos(), "unknown literal type %v", b.Kind)
+ }
+ return n, nil
+}
+
+// shouldQuote indicates that a string may be a YAML 1.1. legacy value and that
+// the string should be quoted.
+func shouldQuote(str string) bool {
+ return legacyStrings[str] || useQuote.MatchString(str)
+}
+
+// This regular expression conservatively matches any date, time string,
+// or base60 float.
+var useQuote = regexp.MustCompile(`^[\-+0-9:\. \t]+([-:]|[tT])[\-+0-9:\. \t]+[zZ]?$`)
+
+// legacyStrings contains a map of fixed strings with special meaning for any
+// type in the YAML Tag registry (https://yaml.org/type/index.html) as used
+// in YAML 1.1.
+//
+// These strings are always quoted upon export to allow for backward
+// compatibility with YAML 1.1 parsers.
+var legacyStrings = map[string]bool{
+ "y": true,
+ "Y": true,
+ "yes": true,
+ "Yes": true,
+ "YES": true,
+ "n": true,
+ "N": true,
+ "t": true,
+ "T": true,
+ "f": true,
+ "F": true,
+ "no": true,
+ "No": true,
+ "NO": true,
+ "true": true,
+ "True": true,
+ "TRUE": true,
+ "false": true,
+ "False": true,
+ "FALSE": true,
+ "on": true,
+ "On": true,
+ "ON": true,
+ "off": true,
+ "Off": true,
+ "OFF": true,
+
+ // Non-standard.
+ ".Nan": true,
+}
+
+func setNum(n *yaml.Node, s string, x interface{}) error {
+ if yaml.Unmarshal([]byte(s), x) == nil {
+ n.Value = s
+ return nil
+ }
+
+ var ni literal.NumInfo
+ if err := literal.ParseNum(s, &ni); err != nil {
+ return err
+ }
+ n.Value = ni.String()
+ return nil
+}
+
+func encodeExprs(exprs []ast.Expr) (n *yaml.Node, err error) {
+ n = &yaml.Node{Kind: yaml.SequenceNode}
+
+ for _, elem := range exprs {
+ e, err := encode(elem)
+ if err != nil {
+ return nil, err
+ }
+ n.Content = append(n.Content, e)
+ }
+ return n, nil
+}
+
+// encodeDecls converts a sequence of declarations to a value. If it encounters
+// an embedded value, it will return this expression. This is more relaxed for
+// structs than is currently allowed for CUE, but the expectation is that this
+// will be allowed at some point. The input would still be illegal CUE.
+func encodeDecls(decls []ast.Decl) (n *yaml.Node, err error) {
+ n = &yaml.Node{Kind: yaml.MappingNode}
+
+ docForNext := strings.Builder{}
+ var lastHead, lastFoot *yaml.Node
+ hasEmbed := false
+ for _, d := range decls {
+ switch x := d.(type) {
+ default:
+ return nil, errors.Newf(x.Pos(), "yaml: unsupported node %s (%T)", astinternal.DebugStr(x), x)
+
+ case *ast.Package:
+ if len(n.Content) > 0 {
+ return nil, errors.Newf(x.Pos(), "invalid package clause")
+ }
+ continue
+
+ case *ast.CommentGroup:
+ docForNext.WriteString(docToYAML(x))
+ docForNext.WriteString("\n\n")
+ continue
+
+ case *ast.Attribute:
+ continue
+
+ case *ast.Field:
+ if x.Token == token.ISA {
+ return nil, errors.Newf(x.TokenPos, "yaml: definition not allowed")
+ }
+ if x.Optional != token.NoPos {
+ return nil, errors.Newf(x.Optional, "yaml: optional fields not allowed")
+ }
+ if hasEmbed {
+ return nil, errors.Newf(x.TokenPos, "yaml: embedding mixed with fields")
+ }
+ name, _, err := ast.LabelName(x.Label)
+ if err != nil {
+ return nil, errors.Newf(x.Label.Pos(), "yaml: only literal labels allowed")
+ }
+
+ label := &yaml.Node{}
+ addDocs(x.Label, label, label)
+ label.SetString(name)
+ if shouldQuote(name) {
+ label.Style = yaml.DoubleQuotedStyle
+ }
+
+ value, err := encode(x.Value)
+ if err != nil {
+ return nil, err
+ }
+ lastHead = label
+ lastFoot = value
+ addDocs(x, label, value)
+ n.Content = append(n.Content, label)
+ n.Content = append(n.Content, value)
+
+ case *ast.EmbedDecl:
+ if hasEmbed {
+ return nil, errors.Newf(x.Pos(), "yaml: multiple embedded values")
+ }
+ hasEmbed = true
+ e, err := encode(x.Expr)
+ if err != nil {
+ return nil, err
+ }
+ addDocs(x, e, e)
+ lastHead = e
+ lastFoot = e
+ n.Content = append(n.Content, e)
+ }
+ if docForNext.Len() > 0 {
+ docForNext.WriteString(lastHead.HeadComment)
+ lastHead.HeadComment = docForNext.String()
+ docForNext.Reset()
+ }
+ }
+
+ if docForNext.Len() > 0 && lastFoot != nil {
+ if !strings.HasSuffix(lastFoot.FootComment, "\n") {
+ lastFoot.FootComment += "\n"
+ }
+ n := docForNext.Len()
+ lastFoot.FootComment += docForNext.String()[:n-1]
+ }
+
+ if hasEmbed {
+ return n.Content[0], nil
+ }
+
+ return n, nil
+}
+
+// addDocs prefixes head, replaces line and appends foot comments.
+func addDocs(n ast.Node, h, f *yaml.Node) {
+ head := ""
+ isDoc := false
+ for _, c := range ast.Comments(n) {
+ switch {
+ case c.Line:
+ f.LineComment = docToYAML(c)
+
+ case c.Position > 0:
+ if f.FootComment != "" {
+ f.FootComment += "\n\n"
+ } else if relPos := c.Pos().RelPos(); relPos == token.NewSection {
+ f.FootComment += "\n"
+ }
+ f.FootComment += docToYAML(c)
+
+ default:
+ if head != "" {
+ head += "\n\n"
+ }
+ head += docToYAML(c)
+ isDoc = isDoc || c.Doc
+ }
+ }
+
+ if head != "" {
+ if h.HeadComment != "" || !isDoc {
+ head += "\n\n"
+ }
+ h.HeadComment = head + h.HeadComment
+ }
+}
+
+// docToYAML converts a CUE CommentGroup to a YAML comment string. This ensures
+// that comments with empty lines get properly converted.
+func docToYAML(c *ast.CommentGroup) string {
+ s := c.Text()
+ if strings.HasSuffix(s, "\n") { // always true
+ s = s[:len(s)-1]
+ }
+ lines := strings.Split(s, "\n")
+ for i, l := range lines {
+ if l == "" {
+ lines[i] = "#"
+ } else {
+ lines[i] = "# " + l
+ }
+ }
+ return strings.Join(lines, "\n")
+}
diff --git a/vendor/cuelang.org/go/internal/internal.go b/vendor/cuelang.org/go/internal/internal.go
new file mode 100644
index 0000000000..1dfc178f0d
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/internal.go
@@ -0,0 +1,422 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal exposes some cue internals to other packages.
+//
+// A better name for this package would be technicaldebt.
+package internal // import "cuelang.org/go/internal"
+
+// TODO: refactor packages as to make this package unnecessary.
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/cockroachdb/apd/v2"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/ast/astutil"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// A Decimal is an arbitrary-precision binary-coded decimal number.
+//
+// Right now Decimal is aliased to apd.Decimal. This may change in the future.
+type Decimal = apd.Decimal
+
+// ErrIncomplete can be used by builtins to signal the evaluation was
+// incomplete.
+var ErrIncomplete = errors.New("incomplete value")
+
+// MakeInstance makes a new instance from a value.
+var MakeInstance func(value interface{}) (instance interface{})
+
+// BaseContext is used as CUEs default context for arbitrary-precision decimals
+var BaseContext = apd.BaseContext.WithPrecision(24)
+
+// ListEllipsis reports the list type and remaining elements of a list. If we
+// ever relax the usage of ellipsis, this function will likely change. Using
+// this function will ensure keeping correct behavior or causing a compiler
+// failure.
+func ListEllipsis(n *ast.ListLit) (elts []ast.Expr, e *ast.Ellipsis) {
+ elts = n.Elts
+ if n := len(elts); n > 0 {
+ var ok bool
+ if e, ok = elts[n-1].(*ast.Ellipsis); ok {
+ elts = elts[:n-1]
+ }
+ }
+ return elts, e
+}
+
+type PkgInfo struct {
+ Package *ast.Package
+ Index int // position in File.Decls
+ Name string
+}
+
+// IsAnonymous reports whether the package is anonymous.
+func (p *PkgInfo) IsAnonymous() bool {
+ return p.Name == "" || p.Name == "_"
+}
+
+func GetPackageInfo(f *ast.File) PkgInfo {
+ for i, d := range f.Decls {
+ switch x := d.(type) {
+ case *ast.CommentGroup:
+ case *ast.Attribute:
+ case *ast.Package:
+ if x.Name == nil {
+ break
+ }
+ return PkgInfo{x, i, x.Name.Name}
+ }
+ }
+ return PkgInfo{}
+}
+
+// Deprecated: use GetPackageInfo
+func PackageInfo(f *ast.File) (p *ast.Package, name string, tok token.Pos) {
+ x := GetPackageInfo(f)
+ if p := x.Package; p != nil {
+ return p, x.Name, p.Name.Pos()
+ }
+ return nil, "", f.Pos()
+}
+
+func SetPackage(f *ast.File, name string, overwrite bool) {
+ p, str, _ := PackageInfo(f)
+ if p != nil {
+ if !overwrite || str == name {
+ return
+ }
+ ident := ast.NewIdent(name)
+ astutil.CopyMeta(ident, p.Name)
+ return
+ }
+
+ decls := make([]ast.Decl, len(f.Decls)+1)
+ k := 0
+ for _, d := range f.Decls {
+ if _, ok := d.(*ast.CommentGroup); ok {
+ decls[k] = d
+ k++
+ continue
+ }
+ break
+ }
+ decls[k] = &ast.Package{Name: ast.NewIdent(name)}
+ copy(decls[k+1:], f.Decls[k:])
+ f.Decls = decls
+}
+
+// NewComment creates a new CommentGroup from the given text.
+// Each line is prefixed with "//" and the last newline is removed.
+// Useful for ASTs generated by code other than the CUE parser.
+func NewComment(isDoc bool, s string) *ast.CommentGroup {
+ if s == "" {
+ return nil
+ }
+ cg := &ast.CommentGroup{Doc: isDoc}
+ if !isDoc {
+ cg.Line = true
+ cg.Position = 10
+ }
+ scanner := bufio.NewScanner(strings.NewReader(s))
+ for scanner.Scan() {
+ scanner := bufio.NewScanner(strings.NewReader(scanner.Text()))
+ scanner.Split(bufio.ScanWords)
+ const maxRunesPerLine = 66
+ count := 2
+ buf := strings.Builder{}
+ buf.WriteString("//")
+ for scanner.Scan() {
+ s := scanner.Text()
+ n := len([]rune(s)) + 1
+ if count+n > maxRunesPerLine && count > 3 {
+ cg.List = append(cg.List, &ast.Comment{Text: buf.String()})
+ count = 3
+ buf.Reset()
+ buf.WriteString("//")
+ }
+ buf.WriteString(" ")
+ buf.WriteString(s)
+ count += n
+ }
+ cg.List = append(cg.List, &ast.Comment{Text: buf.String()})
+ }
+ if last := len(cg.List) - 1; cg.List[last].Text == "//" {
+ cg.List = cg.List[:last]
+ }
+ return cg
+}
+
+func FileComment(f *ast.File) *ast.CommentGroup {
+ pkg, _, _ := PackageInfo(f)
+ var cgs []*ast.CommentGroup
+ if pkg != nil {
+ cgs = pkg.Comments()
+ } else if cgs = f.Comments(); len(cgs) > 0 {
+ // Use file comment.
+ } else {
+ // Use first comment before any declaration.
+ for _, d := range f.Decls {
+ if cg, ok := d.(*ast.CommentGroup); ok {
+ return cg
+ }
+ if cgs = ast.Comments(d); cgs != nil {
+ break
+ }
+ // TODO: what to do here?
+ if _, ok := d.(*ast.Attribute); !ok {
+ break
+ }
+ }
+ }
+ var cg *ast.CommentGroup
+ for _, c := range cgs {
+ if c.Position == 0 {
+ cg = c
+ }
+ }
+ return cg
+}
+
+func NewAttr(name, str string) *ast.Attribute {
+ buf := &strings.Builder{}
+ buf.WriteByte('@')
+ buf.WriteString(name)
+ buf.WriteByte('(')
+ fmt.Fprintf(buf, str)
+ buf.WriteByte(')')
+
+ return &ast.Attribute{Text: buf.String()}
+}
+
+// ToExpr converts a node to an expression. If it is a file, it will return
+// it as a struct. If is an expression, it will return it as is. Otherwise
+// it panics.
+func ToExpr(n ast.Node) ast.Expr {
+ switch x := n.(type) {
+ case nil:
+ return nil
+
+ case ast.Expr:
+ return x
+
+ case *ast.File:
+ start := 0
+ outer:
+ for i, d := range x.Decls {
+ switch d.(type) {
+ case *ast.Package, *ast.ImportDecl:
+ start = i + 1
+ case *ast.CommentGroup, *ast.Attribute:
+ default:
+ break outer
+ }
+ }
+ decls := x.Decls[start:]
+ if len(decls) == 1 {
+ if e, ok := decls[0].(*ast.EmbedDecl); ok {
+ return e.Expr
+ }
+ }
+ return &ast.StructLit{Elts: decls}
+
+ default:
+ panic(fmt.Sprintf("Unsupported node type %T", x))
+ }
+}
+
+// ToFile converts an expression to a file.
+//
+// Adjusts the spacing of x when needed.
+func ToFile(n ast.Node) *ast.File {
+ switch x := n.(type) {
+ case nil:
+ return nil
+ case *ast.StructLit:
+ return &ast.File{Decls: x.Elts}
+ case ast.Expr:
+ ast.SetRelPos(x, token.NoSpace)
+ return &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: x}}}
+ case *ast.File:
+ return x
+ default:
+ panic(fmt.Sprintf("Unsupported node type %T", x))
+ }
+}
+
+// ToStruct gets the non-preamble declarations of a file and puts them in a
+// struct.
+func ToStruct(f *ast.File) *ast.StructLit {
+ start := 0
+ for i, d := range f.Decls {
+ switch d.(type) {
+ case *ast.Package, *ast.ImportDecl:
+ start = i + 1
+ case *ast.Attribute, *ast.CommentGroup:
+ default:
+ break
+ }
+ }
+ s := ast.NewStruct()
+ s.Elts = f.Decls[start:]
+ return s
+}
+
+func IsBulkField(d ast.Decl) bool {
+ if f, ok := d.(*ast.Field); ok {
+ if _, ok := f.Label.(*ast.ListLit); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func IsDef(s string) bool {
+ return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_#")
+}
+
+func IsHidden(s string) bool {
+ return strings.HasPrefix(s, "_")
+}
+
+func IsDefOrHidden(s string) bool {
+ return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_")
+}
+
+func IsDefinition(label ast.Label) bool {
+ switch x := label.(type) {
+ case *ast.Alias:
+ if ident, ok := x.Expr.(*ast.Ident); ok {
+ return IsDef(ident.Name)
+ }
+ case *ast.Ident:
+ return IsDef(x.Name)
+ }
+ return false
+}
+
+func IsRegularField(f *ast.Field) bool {
+ if f.Token == token.ISA {
+ return false
+ }
+ var ident *ast.Ident
+ switch x := f.Label.(type) {
+ case *ast.Alias:
+ ident, _ = x.Expr.(*ast.Ident)
+ case *ast.Ident:
+ ident = x
+ }
+ if ident == nil {
+ return true
+ }
+ if strings.HasPrefix(ident.Name, "#") || strings.HasPrefix(ident.Name, "_") {
+ return false
+ }
+ return true
+}
+
+func EmbedStruct(s *ast.StructLit) *ast.EmbedDecl {
+ e := &ast.EmbedDecl{Expr: s}
+ if len(s.Elts) == 1 {
+ d := s.Elts[0]
+ astutil.CopyPosition(e, d)
+ ast.SetRelPos(d, token.NoSpace)
+ astutil.CopyComments(e, d)
+ ast.SetComments(d, nil)
+ if f, ok := d.(*ast.Field); ok {
+ ast.SetRelPos(f.Label, token.NoSpace)
+ }
+ }
+ s.Lbrace = token.Newline.Pos()
+ s.Rbrace = token.NoSpace.Pos()
+ return e
+}
+
+// IsEllipsis reports whether the declaration can be represented as an ellipsis.
+func IsEllipsis(x ast.Decl) bool {
+ // ...
+ if _, ok := x.(*ast.Ellipsis); ok {
+ return true
+ }
+
+ // [string]: _ or [_]: _
+ f, ok := x.(*ast.Field)
+ if !ok {
+ return false
+ }
+ v, ok := f.Value.(*ast.Ident)
+ if !ok || v.Name != "_" {
+ return false
+ }
+ l, ok := f.Label.(*ast.ListLit)
+ if !ok || len(l.Elts) != 1 {
+ return false
+ }
+ i, ok := l.Elts[0].(*ast.Ident)
+ if !ok {
+ return false
+ }
+ return i.Name == "string" || i.Name == "_"
+}
+
+// GenPath reports the directory in which to store generated files.
+func GenPath(root string) string {
+ info, err := os.Stat(filepath.Join(root, "cue.mod"))
+ if os.IsNotExist(err) || !info.IsDir() {
+ // Try legacy pkgDir mode
+ pkgDir := filepath.Join(root, "pkg")
+ if err == nil && !info.IsDir() {
+ return pkgDir
+ }
+ if info, err := os.Stat(pkgDir); err == nil && info.IsDir() {
+ return pkgDir
+ }
+ }
+ return filepath.Join(root, "cue.mod", "gen")
+}
+
+var ErrInexact = errors.New("inexact subsumption")
+
+func DecorateError(info error, err errors.Error) errors.Error {
+ return &decorated{cueError: err, info: info}
+}
+
+type cueError = errors.Error
+
+type decorated struct {
+ cueError
+
+ info error
+}
+
+func (e *decorated) Is(err error) bool {
+ return errors.Is(e.info, err) || errors.Is(e.cueError, err)
+}
+
+// MaxDepth indicates the maximum evaluation depth. This is there to break
+// cycles in the absence of cycle detection.
+//
+// It is registered in a central place to make it easy to find all spots where
+// cycles are broken in this brute-force manner.
+//
+// TODO(eval): have cycle detection.
+const MaxDepth = 20
diff --git a/vendor/cuelang.org/go/internal/source/source.go b/vendor/cuelang.org/go/internal/source/source.go
new file mode 100644
index 0000000000..384484449e
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/source/source.go
@@ -0,0 +1,53 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package source contains utility functions that standardize reading source
+// bytes across cue packages.
+package source
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+// Read loads the source bytes for the given arguments. If src != nil,
+// Read converts src to a []byte if possible; otherwise it returns an
+// error. If src == nil, readSource returns the result of reading the file
+// specified by filename.
+//
+func Read(filename string, src interface{}) ([]byte, error) {
+ if src != nil {
+ switch s := src.(type) {
+ case string:
+ return []byte(s), nil
+ case []byte:
+ return s, nil
+ case *bytes.Buffer:
+ // is io.Reader, but src is already available in []byte form
+ if s != nil {
+ return s.Bytes(), nil
+ }
+ case io.Reader:
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, s); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ }
+ return nil, fmt.Errorf("invalid source type %T", src)
+ }
+ return ioutil.ReadFile(filename)
+}
diff --git a/vendor/cuelang.org/go/internal/task/task.go b/vendor/cuelang.org/go/internal/task/task.go
new file mode 100644
index 0000000000..3fb6b5085a
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/task/task.go
@@ -0,0 +1,152 @@
+// Copyright 2019 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package task provides a registry for tasks to be used by commands.
+package task
+
+import (
+ "context"
+ "io"
+ "sync"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal/value"
+)
+
+// A Context provides context for running a task.
+type Context struct {
+ Context context.Context
+ Stdin io.Reader
+ Stdout io.Writer
+ Stderr io.Writer
+ Obj cue.Value
+ Err errors.Error
+}
+
+func (c *Context) Lookup(field string) cue.Value {
+ f := c.Obj.Lookup(field)
+ if !f.Exists() {
+ c.addErr(f, nil, "could not find field %q", field)
+ return cue.Value{}
+ }
+ if err := f.Err(); err != nil {
+ c.Err = errors.Append(c.Err, errors.Promote(err, "lookup"))
+ }
+ return f
+}
+
+func (c *Context) Int64(field string) int64 {
+ f := c.Obj.Lookup(field)
+ value, err := f.Int64()
+ if err != nil {
+ c.addErr(f, err, "invalid integer argument")
+ return 0
+ }
+ return value
+}
+
+func (c *Context) String(field string) string {
+ f := c.Obj.Lookup(field)
+ value, err := f.String()
+ if err != nil {
+ c.addErr(f, err, "invalid string argument")
+ return ""
+ }
+ return value
+}
+
+func (c *Context) Bytes(field string) []byte {
+ f := c.Obj.Lookup(field)
+ value, err := f.Bytes()
+ if err != nil {
+ c.addErr(f, err, "invalid bytes argument")
+ return nil
+ }
+ return value
+}
+
+func (c *Context) addErr(v cue.Value, wrap error, format string, args ...interface{}) {
+
+ err := &taskError{
+ task: c.Obj,
+ v: v,
+ Message: errors.NewMessage(format, args),
+ }
+ c.Err = errors.Append(c.Err, errors.Wrap(err, wrap))
+}
+
+// taskError wraps some error values to retain position information about the
+// error.
+type taskError struct {
+ task cue.Value
+ v cue.Value
+ errors.Message
+}
+
+var _ errors.Error = &taskError{}
+
+func (t *taskError) Path() (a []string) {
+ for _, x := range t.v.Path().Selectors() {
+ a = append(a, x.String())
+ }
+ return a
+}
+
+func (t *taskError) Position() token.Pos {
+ return t.task.Pos()
+}
+
+func (t *taskError) InputPositions() (a []token.Pos) {
+ _, nx := value.ToInternal(t.v)
+
+ for _, x := range nx.Conjuncts {
+ if src := x.Source(); src != nil {
+ a = append(a, src.Pos())
+ }
+ }
+ return a
+}
+
+// A RunnerFunc creates a Runner.
+type RunnerFunc func(v cue.Value) (Runner, error)
+
+// A Runner defines a command type.
+type Runner interface {
+ // Init is called with the original configuration before any task is run.
+ // As a result, the configuration may be incomplete, but allows some
+ // validation before tasks are kicked off.
+ // Init(v cue.Value)
+
+ // Runner runs given the current value and returns a new value which is to
+ // be unified with the original result.
+ Run(ctx *Context) (results interface{}, err error)
+}
+
+// Register registers a task for cue commands.
+func Register(key string, f RunnerFunc) {
+ runners.Store(key, f)
+}
+
+// Lookup returns the RunnerFunc for a key.
+func Lookup(key string) RunnerFunc {
+ v, ok := runners.Load(key)
+ if !ok {
+ return nil
+ }
+ return v.(RunnerFunc)
+}
+
+var runners sync.Map
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml
new file mode 100644
index 0000000000..8da58fbf6f
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/METADATA b/vendor/cuelang.org/go/internal/third_party/yaml/METADATA
new file mode 100644
index 0000000000..746edf0614
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/METADATA
@@ -0,0 +1,15 @@
+name: "go-yaml"
+description:
+ "Heavily modified version of go-yaml files. Most of the original "
+ "functionality is gone and replaced with CUE-specific code."
+
+third_party {
+ url {
+ type: GIT
+ value: "https://github.com/go-yaml/yaml"
+ }
+ version: "v2.2.1"
+ last_upgrade_date { year: 2018 month: 10 day: 24 }
+ license_type: NOTICE
+ local_modifications: "Replace Go-struct with CUE mapping."
+}
\ No newline at end of file
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/NOTICE b/vendor/cuelang.org/go/internal/third_party/yaml/NOTICE
new file mode 100644
index 0000000000..866d74a7ad
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/README.md b/vendor/cuelang.org/go/internal/third_party/yaml/README.md
new file mode 100644
index 0000000000..ea39618bbd
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/README.md
@@ -0,0 +1,11 @@
+# YAML reader for CUE
+
+This yaml parser is a heavily modified version of Canonical's go-yaml parser,
+which in turn is a port of the [libyaml](http://pyyaml.org/wiki/LibYAML) parser.
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/apic.go b/vendor/cuelang.org/go/internal/third_party/yaml/apic.go
new file mode 100644
index 0000000000..9cf9005f78
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/apic.go
@@ -0,0 +1,740 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t, filename string) bool {
+ *parser = yaml_parser_t{
+ filename: filename,
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/decode.go b/vendor/cuelang.org/go/internal/third_party/yaml/decode.go
new file mode 100644
index 0000000000..d3ec965ea8
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/decode.go
@@ -0,0 +1,776 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/literal"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ startPos yaml_mark_t
+ endPos yaml_mark_t
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ info *token.File
+ last *node
+ doneInit bool
+}
+
+func readSource(filename string, src interface{}) ([]byte, error) {
+ if src != nil {
+ switch s := src.(type) {
+ case string:
+ return []byte(s), nil
+ case []byte:
+ return s, nil
+ case *bytes.Buffer:
+ // is io.Reader, but src is already available in []byte form
+ if s != nil {
+ return s.Bytes(), nil
+ }
+ case io.Reader:
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, s); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ }
+ return nil, errors.New("invalid source")
+ }
+ return ioutil.ReadFile(filename)
+}
+
+func newParser(filename string, src interface{}) (*parser, error) {
+ b, err := readSource(filename, src)
+ if err != nil {
+ return nil, err
+ }
+ info := token.NewFile(filename, -1, len(b)+2)
+ info.SetLinesForContent(b)
+ p := parser{info: info}
+ if !yaml_parser_initialize(&p.parser, filename) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p, nil
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ p.failf(p.event.end_mark.line, "attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error != yaml_SCANNER_ERROR {
+ line--
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line - 1
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ p.failf(line, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ n := &node{
+ kind: kind,
+ startPos: p.event.start_mark,
+ endPos: p.event.end_mark,
+ }
+ return n
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ p.failf(n.startPos.line, "unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ if len(n.children) > 0 {
+ n.endPos = n.children[len(n.children)-1].endPos
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ if len(n.children) > 0 {
+ n.endPos = n.children[len(n.children)-1].endPos
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ p *parser
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ prev token.Pos
+ lastNode ast.Node
+ forceNewline bool
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(p *parser) *decoder {
+ d := &decoder{p: p, mapType: defaultMapType}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string) string {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ msg := fmt.Sprintf("line %d: cannot unmarshal %s%s", n.startPos.line+1, shortTag(tag), value)
+ d.terrors = append(d.terrors, msg)
+ return msg
+}
+
+func (d *decoder) unmarshal(n *node) (node ast.Expr) {
+ switch n.kind {
+ case documentNode:
+ node = d.document(n)
+ case aliasNode:
+ node = d.alias(n)
+ default:
+ switch n.kind {
+ case scalarNode:
+ node = d.scalar(n)
+ case mappingNode:
+ node = d.mapping(n)
+ case sequenceNode:
+ node = d.sequence(n)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ }
+ return node
+}
+
+func (d *decoder) attachDocComments(m yaml_mark_t, pos int8, expr ast.Node) {
+ comments := []*ast.Comment{}
+ line := 0
+ for len(d.p.parser.comments) > 0 {
+ c := d.p.parser.comments[0]
+ if c.mark.index >= m.index {
+ break
+ }
+ comments = append(comments, &ast.Comment{
+ Slash: d.pos(c.mark),
+ Text: "//" + c.text[1:],
+ })
+ d.p.parser.comments = d.p.parser.comments[1:]
+ line = c.mark.line
+ }
+ if len(comments) > 0 {
+ expr.AddComment(&ast.CommentGroup{
+ Doc: pos == 0 && line+1 == m.line,
+ Position: pos,
+ List: comments,
+ })
+ }
+}
+
+func (d *decoder) attachLineComment(m yaml_mark_t, pos int8, expr ast.Node) {
+ if len(d.p.parser.comments) == 0 {
+ return
+ }
+ c := d.p.parser.comments[0]
+ if c.mark.index == m.index {
+ comment := &ast.Comment{
+ Slash: d.pos(c.mark),
+ Text: "//" + c.text[1:],
+ }
+ expr.AddComment(&ast.CommentGroup{
+ Line: true,
+ Position: pos,
+ List: []*ast.Comment{comment},
+ })
+ }
+}
+
+func (d *decoder) pos(m yaml_mark_t) token.Pos {
+ pos := d.p.info.Pos(m.index+1, token.NoRelPos)
+
+ if d.forceNewline {
+ d.forceNewline = false
+ pos = pos.WithRel(token.Newline)
+ } else if d.prev.IsValid() {
+ c := pos.Position()
+ p := d.prev.Position()
+ switch {
+ case c.Line-p.Line >= 2:
+ pos = pos.WithRel(token.NewSection)
+ case c.Line-p.Line == 1:
+ pos = pos.WithRel(token.Newline)
+ case c.Column-p.Column > 0:
+ pos = pos.WithRel(token.Blank)
+ default:
+ pos = pos.WithRel(token.NoSpace)
+ }
+ if pos.Before(d.prev) {
+ return token.NoPos
+ }
+ }
+
+ d.prev = pos
+ return pos
+}
+
+func (d *decoder) absPos(m yaml_mark_t) token.Pos {
+ return d.p.info.Pos(m.index+1, token.NoRelPos)
+}
+
+func (d *decoder) start(n *node) token.Pos {
+ if n.startPos == n.endPos {
+ return token.NoPos
+ }
+ return d.pos(n.startPos)
+}
+
+func (d *decoder) ident(n *node, name string) *ast.Ident {
+ return &ast.Ident{
+ NamePos: d.pos(n.startPos),
+ Name: name,
+ }
+}
+
+func (d *decoder) document(n *node) ast.Expr {
+ if len(n.children) == 1 {
+ d.doc = n
+ return d.unmarshal(n.children[0])
+ }
+ return &ast.BottomLit{} // TODO: more informatives
+}
+
+func (d *decoder) alias(n *node) ast.Expr {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ d.p.failf(n.startPos.line, "anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ node := d.unmarshal(n.alias)
+ delete(d.aliases, n)
+ return node
+}
+
+var zeroValue reflect.Value
+
+func (d *decoder) scalar(n *node) ast.Expr {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = d.resolve(n)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ d.p.failf(n.startPos.line, "!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return &ast.BasicLit{
+ ValuePos: d.start(n).WithRel(token.Blank),
+ Kind: token.NULL,
+ Value: "null",
+ }
+ }
+ switch tag {
+ // TODO: use parse literal or parse expression instead.
+ case yaml_TIMESTAMP_TAG:
+ return &ast.BasicLit{
+ ValuePos: d.start(n),
+ Kind: token.STRING,
+ Value: literal.String.Quote(n.value),
+ }
+
+ case yaml_STR_TAG:
+ return &ast.BasicLit{
+ ValuePos: d.start(n),
+ Kind: token.STRING,
+ Value: quoteString(n.value),
+ }
+
+ case yaml_BINARY_TAG:
+ return &ast.BasicLit{
+ ValuePos: d.start(n),
+ Kind: token.STRING,
+ Value: literal.Bytes.Quote(resolved.(string)),
+ }
+
+ case yaml_BOOL_TAG:
+ tok := token.FALSE
+ str := "false"
+ if b, _ := resolved.(bool); b {
+ tok = token.TRUE
+ str = "true"
+ }
+ return &ast.BasicLit{
+ ValuePos: d.start(n),
+ Kind: tok,
+ Value: str,
+ }
+
+ case yaml_INT_TAG:
+ // Convert YAML octal to CUE octal. If YAML accepted an invalid
+ // integer, just convert it as well to ensure CUE will fail.
+ s := n.value
+ if len(s) > 1 && s[0] == '0' && s[1] <= '9' {
+ s = "0o" + s[1:]
+ }
+ return d.makeNum(n, s, token.INT)
+
+ case yaml_FLOAT_TAG:
+ value := n.value
+ if f, ok := resolved.(float64); ok {
+ switch {
+ case math.IsInf(f, -1),
+ math.IsInf(f, 1),
+ math.IsNaN(f):
+ value = fmt.Sprint(f)
+ }
+ }
+ if n.tag != "" {
+ if p := strings.IndexAny(value, ".eEiInN"); p == -1 {
+ // TODO: float(v) when we have conversions
+ value = fmt.Sprintf("float & %s", value)
+ }
+ }
+ return d.makeNum(n, value, token.FLOAT)
+
+ case yaml_NULL_TAG:
+ return &ast.BasicLit{
+ ValuePos: d.start(n).WithRel(token.Blank),
+ Kind: token.NULL,
+ Value: "null",
+ }
+ }
+ err := &ast.BottomLit{
+ Bottom: d.pos(n.startPos),
+ }
+ comment := &ast.Comment{
+ Slash: d.start(n),
+ Text: "// " + d.terror(n, tag),
+ }
+ err.AddComment(&ast.CommentGroup{
+ Line: true,
+ Position: 1,
+ List: []*ast.Comment{comment},
+ })
+ return err
+}
+
+func (d *decoder) label(n *node) ast.Label {
+ pos := d.pos(n.startPos)
+
+ switch x := d.scalar(n).(type) {
+ case *ast.BasicLit:
+ if x.Kind == token.STRING {
+ if ast.IsValidIdent(n.value) && !internal.IsDefOrHidden(n.value) {
+ return &ast.Ident{
+ NamePos: pos,
+ Name: n.value,
+ }
+ }
+ ast.SetPos(x, pos)
+ return x
+ }
+
+ return &ast.BasicLit{
+ ValuePos: pos,
+ Kind: token.STRING,
+ Value: literal.Label.Quote(x.Value),
+ }
+
+ default:
+ d.p.failf(n.startPos.line, "invalid label: %q", n.value)
+ }
+
+ return &ast.BasicLit{
+ ValuePos: pos,
+ Kind: token.STRING,
+ Value: "",
+ }
+}
+
+func (d *decoder) makeNum(n *node, val string, kind token.Token) (expr ast.Expr) {
+ minuses := 0
+ for ; val[0] == '-'; val = val[1:] {
+ minuses++
+ }
+ expr = &ast.BasicLit{
+ ValuePos: d.start(n), // + minuses.Pos(),
+ Kind: kind,
+ Value: val,
+ }
+ if minuses > 0 {
+ expr = &ast.UnaryExpr{
+ OpPos: d.start(n),
+ Op: token.SUB,
+ X: expr,
+ }
+ }
+ return expr
+}
+
+// quoteString converts a string to a CUE multiline string if needed.
+func quoteString(s string) string {
+ lines := []string{}
+ last := 0
+ for i, c := range s {
+ if c == '\n' {
+ lines = append(lines, s[last:i])
+ last = i + 1
+ }
+ if c == '\r' {
+ goto quoted
+ }
+ }
+ lines = append(lines, s[last:])
+ if len(lines) >= 2 {
+ buf := []byte{}
+ buf = append(buf, `"""`+"\n"...)
+ for _, l := range lines {
+ if l == "" {
+ // no indentation for empty lines
+ buf = append(buf, '\n')
+ continue
+ }
+ buf = append(buf, '\t')
+ p := len(buf)
+ buf = strconv.AppendQuote(buf, l)
+ // remove quotes
+ buf[p] = '\t'
+ buf[len(buf)-1] = '\n'
+ }
+ buf = append(buf, "\t\t"+`"""`...)
+ return string(buf)
+ }
+quoted:
+ return literal.String.Quote(s)
+}
+
+func (d *decoder) sequence(n *node) ast.Expr {
+ list := &ast.ListLit{}
+ list.Lbrack = d.pos(n.startPos).WithRel(token.Blank)
+ switch ln := len(n.children); ln {
+ case 0:
+ d.prev = list.Lbrack
+ default:
+ d.prev = d.pos(n.children[ln-1].endPos)
+ }
+ list.Rbrack = d.pos(n.endPos)
+
+ noNewline := true
+ single := d.isOneLiner(n.startPos, n.endPos)
+ for _, c := range n.children {
+ d.forceNewline = !single
+ elem := d.unmarshal(c)
+ list.Elts = append(list.Elts, elem)
+ _, noNewline = elem.(*ast.StructLit)
+ }
+ if !single && !noNewline {
+ list.Rbrack = list.Rbrack.WithRel(token.Newline)
+ }
+ return list
+}
+
+func (d *decoder) isOneLiner(start, end yaml_mark_t) bool {
+ s := d.absPos(start).Position()
+ e := d.absPos(end).Position()
+ return s.Line == e.Line
+}
+
+func (d *decoder) mapping(n *node) ast.Expr {
+ newline := d.forceNewline
+
+ structure := &ast.StructLit{}
+ d.insertMap(n, structure, false)
+
+ // NOTE: we currently translate YAML without curly braces to CUE with
+ // curly braces, even for single elements. Removing the following line
+ // would generate the folded form.
+ structure.Lbrace = d.absPos(n.startPos).WithRel(token.NoSpace)
+ structure.Rbrace = d.absPos(n.endPos).WithRel(token.Newline)
+ if d.isOneLiner(n.startPos, n.endPos) && !newline {
+ if len(structure.Elts) != 1 {
+ structure.Lbrace = d.absPos(n.startPos).WithRel(token.Blank)
+ }
+ if len(structure.Elts) != 1 || structure.Elts[0].Pos().RelPos() < token.Newline {
+ structure.Rbrace = structure.Rbrace.WithRel(token.Blank)
+ }
+ }
+ return structure
+}
+
+func (d *decoder) insertMap(n *node, m *ast.StructLit, merge bool) {
+ l := len(n.children)
+outer:
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ merge = true
+ d.merge(n.children[i+1], m)
+ continue
+ }
+ switch n.children[i].kind {
+ case mappingNode:
+ d.p.failf(n.startPos.line, "invalid map key: map")
+ case sequenceNode:
+ d.p.failf(n.startPos.line, "invalid map key: sequence")
+ }
+
+ field := &ast.Field{}
+ d.attachDocComments(n.children[i].startPos, 0, field)
+
+ label := d.label(n.children[i])
+ field.Label = label
+ d.attachLineComment(n.children[i].endPos, 1, label)
+
+ if merge {
+ key := labelStr(label)
+ for _, decl := range m.Elts {
+ f := decl.(*ast.Field)
+ name, _, err := ast.LabelName(f.Label)
+ if err == nil && name == key {
+ f.Value = d.unmarshal(n.children[i+1])
+ continue outer
+ }
+ }
+ }
+
+ value := d.unmarshal(n.children[i+1])
+ field.Value = value
+ d.attachDocComments(n.children[i+1].startPos, 0, value)
+ d.attachLineComment(n.children[i+1].endPos, 10, value)
+
+ m.Elts = append(m.Elts, field)
+ }
+}
+
+func labelStr(l ast.Label) string {
+ switch x := l.(type) {
+ case *ast.Ident:
+ return x.Name
+ case *ast.BasicLit:
+ s, _ := strconv.Unquote(x.Value)
+ return s
+ }
+ return ""
+}
+
+func (d *decoder) failWantMap(n *node) {
+ d.p.failf(n.startPos.line, "map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, m *ast.StructLit) {
+ switch n.kind {
+ case mappingNode:
+ d.insertMap(n, m, true)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ d.failWantMap(n)
+ }
+ d.insertMap(an, m, true)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ d.failWantMap(n)
+ }
+ d.insertMap(an, m, true)
+ continue
+ } else if ni.kind != mappingNode {
+ d.failWantMap(n)
+ }
+ d.insertMap(ni, m, true)
+ }
+ default:
+ d.failWantMap(n)
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/parserc.go b/vendor/cuelang.org/go/internal/third_party/yaml/parserc.go
new file mode 100644
index 0000000000..aaf7f26df5
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/parserc.go
@@ -0,0 +1,1101 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+func add_comment(parser *yaml_parser_t, m yaml_mark_t, text string) {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ mark: m,
+ text: text,
+ })
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t, version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/readerc.go b/vendor/cuelang.org/go/internal/third_party/yaml/readerc.go
new file mode 100644
index 0000000000..b0c436c4a8
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/resolve.go b/vendor/cuelang.org/go/internal/third_party/yaml/resolve.go
new file mode 100644
index 0000000000..9699ada1ac
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/resolve.go
@@ -0,0 +1,256 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "nNtTfF~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func (d *decoder) resolve(n *node) (rtag string, out interface{}) {
+ tag := n.tag
+ in := n.value
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ d.p.failf(n.startPos.line, "cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go b/vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go
new file mode 100644
index 0000000000..94ace4bd5e
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go
@@ -0,0 +1,2719 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ parser.linesSinceLast = 0
+ parser.spacesSinceLast = 0
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ parser.spacesSinceLast++
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ m := parser.mark
+ parser.comment_buffer = parser.comment_buffer[:0]
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ p := parser.buffer_pos
+ skip(parser)
+ parser.comment_buffer = append(parser.comment_buffer,
+ parser.buffer[p:parser.buffer_pos]...)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ add_comment(parser, m, string(parser.comment_buffer))
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ parser.linesSinceLast++
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ m := parser.mark
+ parser.comment_buffer = parser.comment_buffer[:0]
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ p := parser.buffer_pos
+ skip(parser)
+ parser.comment_buffer = append(parser.comment_buffer,
+ parser.buffer[p:parser.buffer_pos]...)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ add_comment(parser, m, string(parser.comment_buffer))
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ m := parser.mark
+ parser.comment_buffer = parser.comment_buffer[:0]
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ p := parser.buffer_pos
+ skip(parser)
+ parser.comment_buffer = append(parser.comment_buffer,
+ parser.buffer[p:parser.buffer_pos]...)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ add_comment(parser, m, string(parser.comment_buffer))
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go b/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go
new file mode 100644
index 0000000000..517761459c
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go
@@ -0,0 +1,369 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml // import "cuelang.org/go/internal/third_party/yaml"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "cuelang.org/go/cue/ast"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(filename string, in []byte) (expr ast.Expr, err error) {
+ return unmarshal(filename, in)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ firstDone bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(filename string, src interface{}) (*Decoder, error) {
+ d, err := newParser(filename, src)
+ if err != nil {
+ return nil, err
+ }
+ return &Decoder{parser: d}, nil
+}
+
+// Decode reads the next YAML-encoded value from its input and stores it in the
+// value pointed to by v. It returns io.EOF if there are no more value in the
+// stream.
+//
+// See the documentation for Unmarshal for details about the conversion of YAML
+// into a Go value.
+func (dec *Decoder) Decode() (expr ast.Expr, err error) {
+ d := newDecoder(dec.parser)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ if !dec.firstDone {
+ expr = ast.NewNull()
+ }
+ return expr, io.EOF
+ }
+ dec.firstDone = true
+ expr = d.unmarshal(node)
+ if len(d.terrors) > 0 {
+ return nil, &TypeError{d.terrors}
+ }
+ return expr, nil
+}
+
+func unmarshal(filename string, in []byte) (expr ast.Expr, err error) {
+ defer handleErr(&err)
+ p, err := newParser(filename, in)
+ if err != nil {
+ return nil, err
+ }
+ defer p.destroy()
+ node := p.parse()
+ d := newDecoder(p)
+ if node != nil {
+ expr = d.unmarshal(node)
+ }
+ if len(d.terrors) > 0 {
+ return nil, &TypeError{d.terrors}
+ }
+ return expr, nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func (p *parser) failf(line int, format string, args ...interface{}) {
+ where := p.parser.filename + ":"
+ line++
+ where += strconv.Itoa(line) + ": "
+ panic(yamlError{fmt.Errorf(where+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go b/vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go
new file mode 100644
index 0000000000..46ce46249b
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go
@@ -0,0 +1,752 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+type yaml_comment_t struct {
+ mark yaml_mark_t
+ text string
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ filename string
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ comment_buffer []byte
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ linesSinceLast int
+ spacesSinceLast int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ comments []yaml_comment_t
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go b/vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go
new file mode 100644
index 0000000000..8110ce3c37
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/cuelang.org/go/internal/types/value.go b/vendor/cuelang.org/go/internal/types/value.go
new file mode 100644
index 0000000000..7b0301f773
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/types/value.go
@@ -0,0 +1,37 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/runtime"
+)
+
+type Value struct {
+ R *runtime.Runtime
+ V *adt.Vertex
+}
+
+type Interface interface {
+ Core(v *Value)
+}
+
+func CastValue(t *Value, x interface{}) bool {
+ c, ok := x.(Interface)
+ if ok {
+ c.Core(t)
+ }
+ return ok
+}
diff --git a/vendor/cuelang.org/go/internal/value/value.go b/vendor/cuelang.org/go/internal/value/value.go
new file mode 100644
index 0000000000..a889738517
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/value/value.go
@@ -0,0 +1,97 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package value contains functions for converting values to internal types
+// and various other Value-related utilities.
+package value
+
+import (
+ "strings"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/internal/core/convert"
+ "cuelang.org/go/internal/core/eval"
+ "cuelang.org/go/internal/core/runtime"
+ "cuelang.org/go/internal/types"
+)
+
+func ConvertToRuntime(c *cue.Context) *cue.Runtime {
+ return (*cue.Runtime)(c)
+}
+
+func ConvertToContext(r *cue.Runtime) *cue.Context {
+ (*runtime.Runtime)(r).Init()
+ return (*cue.Context)(r)
+}
+
+func ToInternal(v cue.Value) (*runtime.Runtime, *adt.Vertex) {
+ var t types.Value
+ v.Core(&t)
+ return t.R, t.V
+}
+
+// Make wraps cue.MakeValue.
+func Make(ctx *adt.OpContext, v adt.Value) cue.Value {
+ return (*cue.Context)(ctx.Impl().(*runtime.Runtime)).Encode(v)
+}
+
+func MakeError(r *runtime.Runtime, err errors.Error) cue.Value {
+ b := &adt.Bottom{Err: err}
+ node := &adt.Vertex{BaseValue: b}
+ node.UpdateStatus(adt.Finalized)
+ node.AddConjunct(adt.MakeRootConjunct(nil, b))
+ return (*cue.Context)(r).Encode(node)
+}
+
+// UnifyBuiltin returns the given Value unified with the given builtin template.
+func UnifyBuiltin(v cue.Value, kind string) cue.Value {
+ p := strings.Split(kind, ".")
+ pkg, name := p[0], p[1]
+ s := runtime.SharedRuntime.LoadImport(pkg)
+ if s == nil {
+ return v
+ }
+
+ ctx := v.Context()
+ a := s.Lookup((*runtime.Runtime)(ctx).Label(name, false))
+ if a == nil {
+ return v
+ }
+
+ return v.Unify(ctx.Encode(a))
+}
+
+func FromGoValue(r *cue.Context, x interface{}, nilIsTop bool) cue.Value {
+ rt := (*runtime.Runtime)(r)
+ rt.Init()
+ ctx := eval.NewContext(rt, nil)
+ v := convert.GoValueToValue(ctx, x, nilIsTop)
+ n := adt.ToVertex(v)
+ return r.Encode(n)
+}
+
+func FromGoType(r *cue.Context, x interface{}) cue.Value {
+ rt := (*runtime.Runtime)(r)
+ rt.Init()
+ ctx := eval.NewContext(rt, nil)
+ expr, err := convert.GoTypeToExpr(ctx, x)
+ if err != nil {
+ expr = &adt.Bottom{Err: err}
+ }
+ n := &adt.Vertex{}
+ n.AddConjunct(adt.MakeRootConjunct(nil, expr))
+ return r.Encode(n)
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/ed25519/ed25519.go b/vendor/cuelang.org/go/pkg/crypto/ed25519/ed25519.go
new file mode 100644
index 0000000000..27d3a33550
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/ed25519/ed25519.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ed25519
+
+import (
+ "crypto/ed25519"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+const (
+ // PublicKeySize is the size of a public key in bytes.
+ PublicKeySize = 32
+)
+
+// Valid verifies the provided signature of the message using the public key.
+// An error is returned if and only if an invalid public key is provided.
+func Valid(publicKey, message, signature []byte) (bool, error) {
+ if size := len(publicKey); size != PublicKeySize {
+ return false, errors.Newf(token.NoPos, "ed25519: publicKey must be 32 bytes")
+ }
+ return ed25519.Verify(publicKey, message, signature), nil
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/ed25519/pkg.go b/vendor/cuelang.org/go/pkg/crypto/ed25519/pkg.go
new file mode 100644
index 0000000000..9b10be43db
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/ed25519/pkg.go
@@ -0,0 +1,38 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package ed25519
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("crypto/ed25519", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "PublicKeySize",
+ Const: "32",
+ }, {
+ Name: "Valid",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ {Kind: adt.BytesKind | adt.StringKind},
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BoolKind,
+ Func: func(c *internal.CallCtxt) {
+ publicKey, message, signature := c.Bytes(0), c.Bytes(1), c.Bytes(2)
+ if c.Do() {
+ c.Ret, c.Err = Valid(publicKey, message, signature)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/hmac/hmac.go b/vendor/cuelang.org/go/pkg/crypto/hmac/hmac.go
new file mode 100644
index 0000000000..bbd8556d75
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/hmac/hmac.go
@@ -0,0 +1,77 @@
+// Copyright 2021 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package hmac implements the Keyed-Hash Message Authentication Code (HMAC) as
+// defined in U.S. Federal Information Processing Standards Publication 198.
+//
+// An HMAC is a cryptographic hash that uses a key to sign a message.
+// The receiver verifies the hash by recomputing it using the same key.
+package hmac
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+)
+
+const (
+ MD5 = "MD5"
+ SHA1 = "SHA1"
+ SHA224 = "SHA224"
+ SHA256 = "SHA256"
+ SHA384 = "SHA384"
+ SHA512 = "SHA512"
+ SHA512_224 = "SHA512_224"
+ SHA512_256 = "SHA512_256"
+)
+
+// Sign returns the HMAC signature of the data, using the provided key and hash function.
+//
+// Supported hash functions: "MD5", "SHA1", "SHA224", "SHA256", "SHA384", "SHA512", "SHA512_224",
+// and "SHA512_256".
+func Sign(hashName string, key []byte, data []byte) ([]byte, error) {
+ hash, err := hashFromName(hashName)
+ if err != nil {
+ return nil, err
+ }
+ mac := hmac.New(hash, key)
+ mac.Write(data)
+ return mac.Sum(nil), nil
+}
+
+func hashFromName(hash string) (func() hash.Hash, error) {
+ switch hash {
+ case MD5:
+ return md5.New, nil
+ case SHA1:
+ return sha1.New, nil
+ case SHA224:
+ return sha256.New224, nil
+ case SHA256:
+ return sha256.New, nil
+ case SHA384:
+ return sha512.New384, nil
+ case SHA512:
+ return sha512.New, nil
+ case SHA512_224:
+ return sha512.New512_224, nil
+ case SHA512_256:
+ return sha512.New512_256, nil
+ }
+ return nil, fmt.Errorf("unsupported hash function")
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/hmac/pkg.go b/vendor/cuelang.org/go/pkg/crypto/hmac/pkg.go
new file mode 100644
index 0000000000..98d72c9841
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/hmac/pkg.go
@@ -0,0 +1,59 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package hmac
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("crypto/hmac", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "MD5",
+ Const: "\"MD5\"",
+ }, {
+ Name: "SHA1",
+ Const: "\"SHA1\"",
+ }, {
+ Name: "SHA224",
+ Const: "\"SHA224\"",
+ }, {
+ Name: "SHA256",
+ Const: "\"SHA256\"",
+ }, {
+ Name: "SHA384",
+ Const: "\"SHA384\"",
+ }, {
+ Name: "SHA512",
+ Const: "\"SHA512\"",
+ }, {
+ Name: "SHA512_224",
+ Const: "\"SHA512_224\"",
+ }, {
+ Name: "SHA512_256",
+ Const: "\"SHA512_256\"",
+ }, {
+ Name: "Sign",
+ Params: []internal.Param{
+ {Kind: adt.StringKind},
+ {Kind: adt.BytesKind | adt.StringKind},
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ hashName, key, data := c.String(0), c.Bytes(1), c.Bytes(2)
+ if c.Do() {
+ c.Ret, c.Err = Sign(hashName, key, data)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/md5/md5.go b/vendor/cuelang.org/go/pkg/crypto/md5/md5.go
new file mode 100644
index 0000000000..d52c528a6c
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/md5/md5.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package md5
+
+import "crypto/md5"
+
+// The size of an MD5 checksum in bytes.
+const Size = 16
+
+// The blocksize of MD5 in bytes.
+const BlockSize = 64
+
+// Sum returns the MD5 checksum of the data.
+func Sum(data []byte) []byte {
+ a := md5.Sum(data)
+ return a[:]
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/md5/pkg.go b/vendor/cuelang.org/go/pkg/crypto/md5/pkg.go
new file mode 100644
index 0000000000..e4bbcda317
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/md5/pkg.go
@@ -0,0 +1,39 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package md5
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("crypto/md5", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "Size",
+ Const: "16",
+ }, {
+ Name: "BlockSize",
+ Const: "64",
+ }, {
+ Name: "Sum",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum(data)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/sha1/pkg.go b/vendor/cuelang.org/go/pkg/crypto/sha1/pkg.go
new file mode 100644
index 0000000000..9792024976
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/sha1/pkg.go
@@ -0,0 +1,39 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package sha1
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("crypto/sha1", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "Size",
+ Const: "20",
+ }, {
+ Name: "BlockSize",
+ Const: "64",
+ }, {
+ Name: "Sum",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum(data)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/sha1/sha1.go b/vendor/cuelang.org/go/pkg/crypto/sha1/sha1.go
new file mode 100644
index 0000000000..de6a8b9c7c
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/sha1/sha1.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha1
+
+import "crypto/sha1"
+
+// The size of a SHA-1 checksum in bytes.
+const Size = 20
+
+// The blocksize of SHA-1 in bytes.
+const BlockSize = 64
+
+// Sum returns the SHA-1 checksum of the data.
+func Sum(data []byte) []byte {
+ a := sha1.Sum(data)
+ return a[:]
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/sha256/pkg.go b/vendor/cuelang.org/go/pkg/crypto/sha256/pkg.go
new file mode 100644
index 0000000000..92d851dfb5
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/sha256/pkg.go
@@ -0,0 +1,54 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package sha256
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("crypto/sha256", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "Size",
+ Const: "32",
+ }, {
+ Name: "Size224",
+ Const: "28",
+ }, {
+ Name: "BlockSize",
+ Const: "64",
+ }, {
+ Name: "Sum256",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum256(data)
+ }
+ },
+ }, {
+ Name: "Sum224",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum224(data)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/sha256/sha256.go b/vendor/cuelang.org/go/pkg/crypto/sha256/sha256.go
new file mode 100644
index 0000000000..3320a06b16
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/sha256/sha256.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha256
+
+import "crypto/sha256"
+
+// The size of a SHA256 checksum in bytes.
+const Size = 32
+
+// The size of a SHA224 checksum in bytes.
+const Size224 = 28
+
+// The blocksize of SHA256 and SHA224 in bytes.
+const BlockSize = 64
+
+// Sum256 returns the SHA256 checksum of the data.
+func Sum256(data []byte) []byte {
+ a := sha256.Sum256(data)
+ return a[:]
+}
+
+// Sum224 returns the SHA224 checksum of the data.
+func Sum224(data []byte) (sum224 []byte) {
+ a := sha256.Sum224(data)
+ return a[:]
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/sha512/pkg.go b/vendor/cuelang.org/go/pkg/crypto/sha512/pkg.go
new file mode 100644
index 0000000000..607da475d6
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/sha512/pkg.go
@@ -0,0 +1,84 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package sha512
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("crypto/sha512", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "Size",
+ Const: "64",
+ }, {
+ Name: "Size224",
+ Const: "28",
+ }, {
+ Name: "Size256",
+ Const: "32",
+ }, {
+ Name: "Size384",
+ Const: "48",
+ }, {
+ Name: "BlockSize",
+ Const: "128",
+ }, {
+ Name: "Sum512",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum512(data)
+ }
+ },
+ }, {
+ Name: "Sum384",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum384(data)
+ }
+ },
+ }, {
+ Name: "Sum512_224",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum512_224(data)
+ }
+ },
+ }, {
+ Name: "Sum512_256",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Sum512_256(data)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/crypto/sha512/sha512.go b/vendor/cuelang.org/go/pkg/crypto/sha512/sha512.go
new file mode 100644
index 0000000000..c010e5306c
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/crypto/sha512/sha512.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha512
+
+import "crypto/sha512"
+
+const (
+ // Size is the size, in bytes, of a SHA-512 checksum.
+ Size = 64
+
+ // Size224 is the size, in bytes, of a SHA-512/224 checksum.
+ Size224 = 28
+
+ // Size256 is the size, in bytes, of a SHA-512/256 checksum.
+ Size256 = 32
+
+ // Size384 is the size, in bytes, of a SHA-384 checksum.
+ Size384 = 48
+
+ // BlockSize is the block size, in bytes, of the SHA-512/224,
+ // SHA-512/256, SHA-384 and SHA-512 hash functions.
+ BlockSize = 128
+)
+
+// Sum512 returns the SHA512 checksum of the data.
+func Sum512(data []byte) []byte {
+ a := sha512.Sum512(data)
+ return a[:]
+}
+
+// Sum384 returns the SHA384 checksum of the data.
+func Sum384(data []byte) (sum384 []byte) {
+ a := sha512.Sum384(data)
+ return a[:]
+}
+
+// Sum512_224 returns the Sum512/224 checksum of the data.
+func Sum512_224(data []byte) (sum224 []byte) {
+ a := sha512.Sum512_224(data)
+ return a[:]
+}
+
+// Sum512_256 returns the Sum512/256 checksum of the data.
+func Sum512_256(data []byte) (sum256 []byte) {
+ a := sha512.Sum512_256(data)
+ return a[:]
+}
diff --git a/vendor/cuelang.org/go/pkg/doc.go b/vendor/cuelang.org/go/pkg/doc.go
new file mode 100644
index 0000000000..307e5cbf1b
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/doc.go
@@ -0,0 +1,38 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pkg define CUE standard packages.
+//
+// Many of the standard packages are modeled after and generated from the Go
+// core packages. The types, values, and functions are defined as their Go
+// equivalence and mapped to CUE types.
+//
+// Beware that some packages are defined in lesser-precision types than are
+// typically used in CUE and thus may lead to loss of precision.
+//
+// All packages except those defined in the tool subdirectory are hermetic,
+// that is depending only on a known set of inputs, and therefore can guarantee
+// reproducible results. That is:
+//
+// - no reading of files contents
+// - no querying of the file system of any kind
+// - no communication on the network
+// - no information about the type of environment
+// - only reproduceable random generators
+//
+// Hermetic configurations allow for fast and advanced analysis that otherwise
+// would not be possible or practical. The cue "cmd" command can be used to mix
+// in non-hermetic influences into configurations by using packages defined
+// in the tool subdirectory.
+package pkg
diff --git a/vendor/cuelang.org/go/pkg/encoding/base64/manual.go b/vendor/cuelang.org/go/pkg/encoding/base64/manual.go
new file mode 100644
index 0000000000..f5f3aaebe5
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/base64/manual.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package base64 implements base64 encoding as specified by RFC 4648.
+package base64
+
+import (
+ "encoding/base64"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+// EncodedLen returns the length in bytes of the base64 encoding
+// of an input buffer of length n. Encoding needs to be set to null
+// as only StdEncoding is supported for now.
+func EncodedLen(encoding cue.Value, n int) (int, error) {
+ if err := encoding.Null(); err != nil {
+ return 0, errors.Wrapf(err, token.NoPos, "base64: unsupported encoding")
+ }
+ return base64.StdEncoding.EncodedLen(n), nil
+}
+
+// DecodedLen returns the maximum length in bytes of the decoded data
+// corresponding to n bytes of base64-encoded data. Encoding needs to be set to
+// null as only StdEncoding is supported for now.
+func DecodedLen(encoding cue.Value, x int) (int, error) {
+ if err := encoding.Null(); err != nil {
+ return 0, errors.Wrapf(err, token.NoPos, "base64: unsupported encoding")
+ }
+ return base64.StdEncoding.DecodedLen(x), nil
+}
+
+// Encode returns the base64 encoding of src. Encoding needs to be set to null
+// as only StdEncoding is supported for now.
+func Encode(encoding cue.Value, src []byte) (string, error) {
+ if err := encoding.Null(); err != nil {
+ return "", errors.Wrapf(err, token.NoPos, "base64: unsupported encoding")
+ }
+ return base64.StdEncoding.EncodeToString(src), nil
+}
+
+// Decode returns the bytes represented by the base64 string s. Encoding needs
+// to be set to null as only StdEncoding is supported for now.
+func Decode(encoding cue.Value, s string) ([]byte, error) {
+ if err := encoding.Null(); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "base64: unsupported encoding")
+ }
+ return base64.StdEncoding.DecodeString(s)
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/base64/pkg.go b/vendor/cuelang.org/go/pkg/encoding/base64/pkg.go
new file mode 100644
index 0000000000..12f791c4c0
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/base64/pkg.go
@@ -0,0 +1,73 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package base64
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("encoding/base64", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "EncodedLen",
+ Params: []internal.Param{
+ {Kind: adt.TopKind},
+ {Kind: adt.IntKind},
+ },
+ Result: adt.IntKind,
+ Func: func(c *internal.CallCtxt) {
+ encoding, n := c.Value(0), c.Int(1)
+ if c.Do() {
+ c.Ret, c.Err = EncodedLen(encoding, n)
+ }
+ },
+ }, {
+ Name: "DecodedLen",
+ Params: []internal.Param{
+ {Kind: adt.TopKind},
+ {Kind: adt.IntKind},
+ },
+ Result: adt.IntKind,
+ Func: func(c *internal.CallCtxt) {
+ encoding, x := c.Value(0), c.Int(1)
+ if c.Do() {
+ c.Ret, c.Err = DecodedLen(encoding, x)
+ }
+ },
+ }, {
+ Name: "Encode",
+ Params: []internal.Param{
+ {Kind: adt.TopKind},
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ encoding, src := c.Value(0), c.Bytes(1)
+ if c.Do() {
+ c.Ret, c.Err = Encode(encoding, src)
+ }
+ },
+ }, {
+ Name: "Decode",
+ Params: []internal.Param{
+ {Kind: adt.TopKind},
+ {Kind: adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ encoding, s := c.Value(0), c.String(1)
+ if c.Do() {
+ c.Ret, c.Err = Decode(encoding, s)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/csv/manual.go b/vendor/cuelang.org/go/pkg/encoding/csv/manual.go
new file mode 100644
index 0000000000..99e2cf1df5
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/csv/manual.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package csv
+
+import (
+ "bytes"
+ "encoding/csv"
+ "io"
+
+ "cuelang.org/go/cue"
+)
+
+// Encode encode the given list of lists to CSV.
+func Encode(x cue.Value) (string, error) {
+ buf := &bytes.Buffer{}
+ w := csv.NewWriter(buf)
+ iter, err := x.List()
+ if err != nil {
+ return "", err
+ }
+ for iter.Next() {
+ row, err := iter.Value().List()
+ if err != nil {
+ return "", err
+ }
+ a := []string{}
+ for row.Next() {
+ col := row.Value()
+ if str, err := col.String(); err == nil {
+ a = append(a, str)
+ } else {
+ b, err := col.MarshalJSON()
+ if err != nil {
+ return "", err
+ }
+ a = append(a, string(b))
+ }
+ }
+ _ = w.Write(a)
+ }
+ w.Flush()
+ return buf.String(), nil
+}
+
+// Decode reads in a csv into a list of lists.
+func Decode(r io.Reader) ([][]string, error) {
+ return csv.NewReader(r).ReadAll()
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/csv/pkg.go b/vendor/cuelang.org/go/pkg/encoding/csv/pkg.go
new file mode 100644
index 0000000000..f5c5482eaa
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/csv/pkg.go
@@ -0,0 +1,45 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package csv
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("encoding/csv", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "Encode",
+ Params: []internal.Param{
+ {Kind: adt.TopKind},
+ },
+ Result: adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ x := c.Value(0)
+ if c.Do() {
+ c.Ret, c.Err = Encode(x)
+ }
+ },
+ }, {
+ Name: "Decode",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.ListKind,
+ Func: func(c *internal.CallCtxt) {
+ r := c.Reader(0)
+ if c.Do() {
+ c.Ret, c.Err = Decode(r)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/hex/hex.go b/vendor/cuelang.org/go/pkg/encoding/hex/hex.go
new file mode 100644
index 0000000000..da6d820efa
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/hex/hex.go
@@ -0,0 +1,51 @@
+// Copyright 2020 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Generated with go run cuelang.org/go/internal/cmd/qgo -stripstr -exclude=Decode$,Encode$,EncodeToString,Dumper extract encoding/hex
+
+package hex
+
+import "encoding/hex"
+
+// EncodedLen returns the length of an encoding of n source bytes.
+// Specifically, it returns n * 2.
+func EncodedLen(n int) int {
+ return hex.EncodedLen(n)
+}
+
+// DecodedLen returns the length of a decoding of x source bytes.
+// Specifically, it returns x / 2.
+func DecodedLen(x int) int {
+ return hex.DecodedLen(x)
+}
+
+// Decode returns the bytes represented by the hexadecimal string s.
+//
+// Decode expects that src contains only hexadecimal
+// characters and that src has even length.
+// If the input is malformed, Decode returns
+// the bytes decoded before the error.
+func Decode(s string) ([]byte, error) {
+ return hex.DecodeString(s)
+}
+
+// Dump returns a string that contains a hex dump of the given data. The format
+// of the hex dump matches the output of `hexdump -C` on the command line.
+func Dump(data []byte) string {
+ return hex.Dump(data)
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/hex/manual.go b/vendor/cuelang.org/go/pkg/encoding/hex/manual.go
new file mode 100644
index 0000000000..9f1ca552fc
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/hex/manual.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hex
+
+import "encoding/hex"
+
+// Encode returns the hexadecimal encoding of src.
+func Encode(src []byte) string {
+ return hex.EncodeToString(src)
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/hex/pkg.go b/vendor/cuelang.org/go/pkg/encoding/hex/pkg.go
new file mode 100644
index 0000000000..5918fe0c15
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/hex/pkg.go
@@ -0,0 +1,81 @@
+// Code generated by go generate. DO NOT EDIT.
+
+//go:generate rm pkg.go
+//go:generate go run ../../gen/gen.go
+
+package hex
+
+import (
+ "cuelang.org/go/internal/core/adt"
+ "cuelang.org/go/pkg/internal"
+)
+
+func init() {
+ internal.Register("encoding/hex", pkg)
+}
+
+var _ = adt.TopKind // in case the adt package isn't used
+
+var pkg = &internal.Package{
+ Native: []*internal.Builtin{{
+ Name: "EncodedLen",
+ Params: []internal.Param{
+ {Kind: adt.IntKind},
+ },
+ Result: adt.IntKind,
+ Func: func(c *internal.CallCtxt) {
+ n := c.Int(0)
+ if c.Do() {
+ c.Ret = EncodedLen(n)
+ }
+ },
+ }, {
+ Name: "DecodedLen",
+ Params: []internal.Param{
+ {Kind: adt.IntKind},
+ },
+ Result: adt.IntKind,
+ Func: func(c *internal.CallCtxt) {
+ x := c.Int(0)
+ if c.Do() {
+ c.Ret = DecodedLen(x)
+ }
+ },
+ }, {
+ Name: "Decode",
+ Params: []internal.Param{
+ {Kind: adt.StringKind},
+ },
+ Result: adt.BytesKind | adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ s := c.String(0)
+ if c.Do() {
+ c.Ret, c.Err = Decode(s)
+ }
+ },
+ }, {
+ Name: "Dump",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ data := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Dump(data)
+ }
+ },
+ }, {
+ Name: "Encode",
+ Params: []internal.Param{
+ {Kind: adt.BytesKind | adt.StringKind},
+ },
+ Result: adt.StringKind,
+ Func: func(c *internal.CallCtxt) {
+ src := c.Bytes(0)
+ if c.Do() {
+ c.Ret = Encode(src)
+ }
+ },
+ }},
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/json/json.go b/vendor/cuelang.org/go/pkg/encoding/json/json.go
new file mode 100644
index 0000000000..9e3762d76e
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/json/json.go
@@ -0,0 +1,28 @@
+// Copyright 2020 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Generated with go run cuelang.org/go/internal/cmd/qgo -exclude=Compact,Indent,arshal$ extract encoding/json
+
+package json
+
+import "encoding/json"
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+ return json.Valid(data)
+}
diff --git a/vendor/cuelang.org/go/pkg/encoding/json/manual.go b/vendor/cuelang.org/go/pkg/encoding/json/manual.go
new file mode 100644
index 0000000000..161c6d028e
--- /dev/null
+++ b/vendor/cuelang.org/go/pkg/encoding/json/manual.go
@@ -0,0 +1,137 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+ cuejson "cuelang.org/go/encoding/json"
+)
+
+// Compact generates the JSON-encoded src with insignificant space characters
+// elided.
+func Compact(src []byte) (string, error) {
+ dst := bytes.Buffer{}
+ if err := json.Compact(&dst, src); err != nil {
+ return "", err
+ }
+ return dst.String(), nil
+}
+
+// Indent creates an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(src []byte, prefix, indent string) (string, error) {
+ dst := bytes.Buffer{}
+ if err := json.Indent(&dst, src, prefix, indent); err != nil {
+ return "", err
+ }
+ return dst.String(), nil
+}
+
+// HTMLEscape returns the JSON-encoded src with <, >, &, U+2028 and
+// U+2029 characters inside string literals changed to \u003c, \u003e, \u0026,
+// \u2028, \u2029 so that the JSON will be safe to embed inside HTML
+
+
+
+
+